Repository: groops-devs/groops
Branch: main
Commit: 54351e0dc4dc
Files: 1650
Total size: 14.8 MB
Directory structure:
gitextract_2kegjav5/
├── .github/
│ ├── CODE_OF_CONDUCT.md
│ └── ISSUE_TEMPLATE/
│ ├── bug_report.yml
│ ├── config.yml
│ └── feature_request.yml
├── .gitignore
├── CHANGELOG.md
├── CITATION.bib
├── CONTRIBUTING.md
├── INSTALL.md
├── LICENSE
├── README.md
├── bin/
│ └── .gitignore
├── docs/
│ ├── documentation.html
│ ├── documentationSource.html
│ ├── html/
│ │ ├── Accelerometer2GraceL1b.html
│ │ ├── AutoregressiveModel2CovarianceMatrix.html
│ │ ├── BerneseKinematic2Orbit.html
│ │ ├── Champ2AccStar.html
│ │ ├── Champ2Orbit.html
│ │ ├── Cosmic2OrbitStar.html
│ │ ├── CovarianceFunction2DigitalFilter.html
│ │ ├── CovarianceFunction2PowerSpectralDensity.html
│ │ ├── CovarianceMatrix2AutoregressiveModel.html
│ │ ├── CovarianceMatrix2Correlation.html
│ │ ├── Cpf2Orbit.html
│ │ ├── Crd2NormalPoints.html
│ │ ├── Cstg2NormalPoints.html
│ │ ├── DigitalFilter2FrequencyResponse.html
│ │ ├── DigitalFilter2ImpulseResponse.html
│ │ ├── DoodsonAdmittance2SupplementaryFiles.html
│ │ ├── DoodsonAdmittanceInterpolation.html
│ │ ├── DoodsonAdmittanceTimeSeries.html
│ │ ├── DoodsonArguments2TimeSeries.html
│ │ ├── DoodsonHarmonics2GriddedAmplitudeAndPhase.html
│ │ ├── DoodsonHarmonics2IersPotential.html
│ │ ├── DoodsonHarmonics2IersWaterHeight.html
│ │ ├── DoodsonHarmonics2PotentialCoefficients.html
│ │ ├── DoodsonHarmonicsCalculateAdmittance.html
│ │ ├── DoodsonHarmonicsChangePartialTides.html
│ │ ├── EarthOrientationParameterTimeSeries.html
│ │ ├── EarthRotaryVectorTimeSeries.html
│ │ ├── EclipseFactor2GriddedData.html
│ │ ├── EnsembleAveragingScaleModel.html
│ │ ├── FileConvert.html
│ │ ├── FileCreateDirectories.html
│ │ ├── FileMove.html
│ │ ├── FileRemove.html
│ │ ├── FileTextCreate.html
│ │ ├── FilterMatrixWindowedPotentialCoefficients.html
│ │ ├── FunctionsCalculate.html
│ │ ├── GnssAntennaDefinition2ParameterVector.html
│ │ ├── GnssAntennaDefinition2Skyplot.html
│ │ ├── GnssAntennaDefinitionCreate.html
│ │ ├── GnssAntennaNormalsConstraint.html
│ │ ├── GnssAntex2AntennaDefinition.html
│ │ ├── GnssAttitude2Orbex.html
│ │ ├── GnssAttitudeInfoCreate.html
│ │ ├── GnssBiasClockAlignment.html
│ │ ├── GnssClock2ClockRinex.html
│ │ ├── GnssClockRinex2InstrumentClock.html
│ │ ├── GnssEop2IgsErp.html
│ │ ├── GnssEstimateClockShift.html
│ │ ├── GnssGlonassFrequencyNumberUpdate.html
│ │ ├── GnssGriddedDataTimeSeries2Ionex.html
│ │ ├── GnssIonex2GriddedDataTimeSeries.html
│ │ ├── GnssNormals2Sinex.html
│ │ ├── GnssOrbex2StarCamera.html
│ │ ├── GnssPrn2SvnBlockVariables.html
│ │ ├── GnssProcessing.html
│ │ ├── GnssReceiver2RinexObservation.html
│ │ ├── GnssReceiverDefinitionCreate.html
│ │ ├── GnssResiduals2AccuracyDefinition.html
│ │ ├── GnssResiduals2Skyplot.html
│ │ ├── GnssResiduals2TransmitterAccuracyDefinition.html
│ │ ├── GnssRinexNavigation2OrbitClock.html
│ │ ├── GnssSignalBias2Matrix.html
│ │ ├── GnssSignalBias2SinexBias.html
│ │ ├── GnssSimulateReceiver.html
│ │ ├── GnssSinexBias2SignalBias.html
│ │ ├── GnssStationInfoCreate.html
│ │ ├── GnssStationLog2Platform.html
│ │ ├── GnssTroposphere2TropoSinex.html
│ │ ├── GoceXml2Gradiometer.html
│ │ ├── GoceXml2Orbit.html
│ │ ├── GoceXml2StarCamera.html
│ │ ├── GoceXmlEggNom1b.html
│ │ ├── Grace2PotentialCoefficients.html
│ │ ├── GraceAccelerometer2L1bAscii.html
│ │ ├── GraceAntennaCenterCorrectionArcCovariance.html
│ │ ├── GraceAod2DoodsonHarmonics.html
│ │ ├── GraceAod2TimeSplines.html
│ │ ├── GraceCoefficients2BlockMeanTimeSplines.html
│ │ ├── GraceL1a2Accelerometer.html
│ │ ├── GraceL1a2SatelliteTracking.html
│ │ ├── GraceL1a2StarCamera.html
│ │ ├── GraceL1a2Temperature.html
│ │ ├── GraceL1b2AccHousekeeping.html
│ │ ├── GraceL1b2Accelerometer.html
│ │ ├── GraceL1b2ClockOffset.html
│ │ ├── GraceL1b2GnssReceiver.html
│ │ ├── GraceL1b2Magnetometer.html
│ │ ├── GraceL1b2Mass.html
│ │ ├── GraceL1b2Orbit.html
│ │ ├── GraceL1b2SatelliteTracking.html
│ │ ├── GraceL1b2StarCamera.html
│ │ ├── GraceL1b2StarCameraCovariance.html
│ │ ├── GraceL1b2SteeringMirror.html
│ │ ├── GraceL1b2Thruster.html
│ │ ├── GraceL1b2TimeOffset.html
│ │ ├── GraceL1b2Uso.html
│ │ ├── GraceL1b2Vector.html
│ │ ├── GraceOrbit2TransplantTimeOffset.html
│ │ ├── GraceSequenceOfEvents.html
│ │ ├── GraceSstResidualAnalysis.html
│ │ ├── GraceSstScaleModel.html
│ │ ├── GraceSstSpecialEvents.html
│ │ ├── GraceThrusterResponse2Accelerometer.html
│ │ ├── GrailCdr2Orbit.html
│ │ ├── GrailCdr2SatelliteTracking.html
│ │ ├── GrailCdr2StarCamera.html
│ │ ├── Gravityfield2AbsoluteGravity.html
│ │ ├── Gravityfield2AreaMeanTimeSeries.html
│ │ ├── Gravityfield2Deflections.html
│ │ ├── Gravityfield2DegreeAmplitudes.html
│ │ ├── Gravityfield2DegreeAmplitudesPlotGrid.html
│ │ ├── Gravityfield2DisplacementTimeSeries.html
│ │ ├── Gravityfield2EmpiricalCovariance.html
│ │ ├── Gravityfield2Gradients.html
│ │ ├── Gravityfield2GravityVector.html
│ │ ├── Gravityfield2GridCovarianceMatrix.html
│ │ ├── Gravityfield2GriddedData.html
│ │ ├── Gravityfield2GriddedDataTimeSeries.html
│ │ ├── Gravityfield2PotentialCoefficients.html
│ │ ├── Gravityfield2PotentialCoefficientsTimeSeries.html
│ │ ├── Gravityfield2SphericalHarmonicsVector.html
│ │ ├── Gravityfield2TimeSplines.html
│ │ ├── Gravityfield2TrendPotentialCoefficients.html
│ │ ├── GravityfieldCovariancesPropagation2GriddedData.html
│ │ ├── GravityfieldReplacePotentialCoefficients.html
│ │ ├── GravityfieldVariancesPropagation2GriddedData.html
│ │ ├── GridRectangular2NetCdf.html
│ │ ├── GriddedData2AreaMeanTimeSeries.html
│ │ ├── GriddedData2GriddedDataStatistics.html
│ │ ├── GriddedData2GriddedDataTimeSeries.html
│ │ ├── GriddedData2Matrix.html
│ │ ├── GriddedData2NetCdf.html
│ │ ├── GriddedData2PotentialCoefficients.html
│ │ ├── GriddedData2SphericalDistance.html
│ │ ├── GriddedData2TimeSeries.html
│ │ ├── GriddedDataCalculate.html
│ │ ├── GriddedDataConcatenate.html
│ │ ├── GriddedDataCreate.html
│ │ ├── GriddedDataInterpolate.html
│ │ ├── GriddedDataReduceSampling.html
│ │ ├── GriddedDataTimeSeries2GriddedData.html
│ │ ├── GriddedDataTimeSeries2NetCdf.html
│ │ ├── GriddedDataTimeSeries2PotentialCoefficients.html
│ │ ├── GriddedTopography2AtmospherePotentialCoefficients.html
│ │ ├── GriddedTopography2PotentialCoefficients.html
│ │ ├── GriddedTopographyEllipsoidal2Radial.html
│ │ ├── GroopsAscii2Orbit.html
│ │ ├── GroupPrograms.html
│ │ ├── Grs2PotentialCoefficients.html
│ │ ├── Hw2TideGeneratingPotential.html
│ │ ├── Icgem2PotentialCoefficients.html
│ │ ├── Iers2OceanPoleTide.html
│ │ ├── IersC04IAU2000EarthOrientationParameter.html
│ │ ├── IersHighFrequentEop2DoodsonEop.html
│ │ ├── IersPotential2DoodsonHarmonics.html
│ │ ├── IersRapidIAU2000EarthOrientationParameter.html
│ │ ├── IersWaterHeight2DoodsonHarmonics.html
│ │ ├── IfPrograms.html
│ │ ├── Igs2EarthOrientationParameter.html
│ │ ├── Instrument2AllanVariance.html
│ │ ├── Instrument2CovarianceFunctionVCE.html
│ │ ├── Instrument2CrossCorrelationFunction.html
│ │ ├── Instrument2Histogram.html
│ │ ├── Instrument2PowerSpectralDensity.html
│ │ ├── Instrument2RmsPlotGrid.html
│ │ ├── Instrument2Scaleogram.html
│ │ ├── Instrument2SpectralCoherence.html
│ │ ├── Instrument2Spectrogram.html
│ │ ├── InstrumentAccelerometer2ThermosphericDensity.html
│ │ ├── InstrumentAccelerometerApplyEstimatedParameters.html
│ │ ├── InstrumentAccelerometerEstimateBiasScale.html
│ │ ├── InstrumentAccelerometerEstimateParameters.html
│ │ ├── InstrumentApplyTimeOffset.html
│ │ ├── InstrumentArcCalculate.html
│ │ ├── InstrumentArcCrossStatistics.html
│ │ ├── InstrumentArcStatistics.html
│ │ ├── InstrumentConcatenate.html
│ │ ├── InstrumentCovarianceCheck.html
│ │ ├── InstrumentDetrend.html
│ │ ├── InstrumentEarthRotation.html
│ │ ├── InstrumentEstimateEmpiricalCovariance.html
│ │ ├── InstrumentEstimateHelmertTransformation.html
│ │ ├── InstrumentFilter.html
│ │ ├── InstrumentGnssReceiver2TimeSeries.html
│ │ ├── InstrumentInsertNAN.html
│ │ ├── InstrumentMultiplyAdd.html
│ │ ├── InstrumentReduceSampling.html
│ │ ├── InstrumentRemoveEpochsByCriteria.html
│ │ ├── InstrumentRemoveEpochsByTimes.html
│ │ ├── InstrumentRemoveEpochsThruster.html
│ │ ├── InstrumentResample.html
│ │ ├── InstrumentRotate.html
│ │ ├── InstrumentSatelliteTrackingAntennaCenterCorrection.html
│ │ ├── InstrumentSetType.html
│ │ ├── InstrumentStarCamera2AccAngularRate.html
│ │ ├── InstrumentStarCamera2RollPitchYaw.html
│ │ ├── InstrumentStarCamera2RotaryMatrix.html
│ │ ├── InstrumentStarCameraAngularAccelerometerFusion.html
│ │ ├── InstrumentStarCameraMultiply.html
│ │ ├── InstrumentStatisticsTimeSeries.html
│ │ ├── InstrumentSynchronize.html
│ │ ├── InstrumentWaveletDecomposition.html
│ │ ├── Jason2Starcamera.html
│ │ ├── JplAscii2Ephemerides.html
│ │ ├── KalmanBuildNormals.html
│ │ ├── KalmanFilter.html
│ │ ├── KalmanSmoother.html
│ │ ├── KalmanSmootherLeastSquares.html
│ │ ├── Kaula2SigmaPotentialCoefficients.html
│ │ ├── Kernel2Coefficients.html
│ │ ├── Kernel2SigmaPotentialCoefficients.html
│ │ ├── KernelEvaluate.html
│ │ ├── LocalLevelFrame2StarCamera.html
│ │ ├── LoopPrograms.html
│ │ ├── MagneticField2GriddedData.html
│ │ ├── Matrix2GriddedData.html
│ │ ├── MatrixCalculate.html
│ │ ├── MatrixRectangular2GriddedData.html
│ │ ├── Merit2FullRate.html
│ │ ├── Merit2NormalPoints.html
│ │ ├── ModelEquilibriumTide.html
│ │ ├── NetCdf2GridRectangular.html
│ │ ├── NetCdf2GriddedData.html
│ │ ├── NetCdf2GriddedDataTimeSeries.html
│ │ ├── NetCdfInfo.html
│ │ ├── NoiseAccelerometer.html
│ │ ├── NoiseGriddedData.html
│ │ ├── NoiseInstrument.html
│ │ ├── NoiseNormalsSolution.html
│ │ ├── NoiseOrbit.html
│ │ ├── NoiseSatelliteTracking.html
│ │ ├── NoiseStarCamera.html
│ │ ├── NoiseTimeSeries.html
│ │ ├── NormalsAccumulate.html
│ │ ├── NormalsBuild.html
│ │ ├── NormalsBuildShortTimeStaticLongTime.html
│ │ ├── NormalsCreate.html
│ │ ├── NormalsEliminate.html
│ │ ├── NormalsMultiplyAdd.html
│ │ ├── NormalsRegularizationBorders.html
│ │ ├── NormalsRegularizationSphericalHarmonics.html
│ │ ├── NormalsReorder.html
│ │ ├── NormalsReorderAndAccumulate.html
│ │ ├── NormalsScale.html
│ │ ├── NormalsSolverVCE.html
│ │ ├── NormalsSphericalHarmonics2Sinex.html
│ │ ├── NormalsTemporalCombination.html
│ │ ├── ObservationEquations2Files.html
│ │ ├── OceanTidesDTU2GriddedData.html
│ │ ├── Orbit2ArgumentOfLatitude.html
│ │ ├── Orbit2BetaPrimeAngle.html
│ │ ├── Orbit2Cpf.html
│ │ ├── Orbit2EarthFixedOrbit.html
│ │ ├── Orbit2EclipseFactor.html
│ │ ├── Orbit2GraceL1b.html
│ │ ├── Orbit2GroopsAscii.html
│ │ ├── Orbit2Groundtracks.html
│ │ ├── Orbit2Kepler.html
│ │ ├── Orbit2MagneticField.html
│ │ ├── Orbit2Sp3Format.html
│ │ ├── Orbit2ThermosphericState.html
│ │ ├── OrbitAddVelocityAndAcceleration.html
│ │ ├── ParameterNamesCreate.html
│ │ ├── ParameterSelection2IndexVector.html
│ │ ├── ParameterVector2GnssAntennaDefinition.html
│ │ ├── PlanetOrbit.html
│ │ ├── PlatformCreate.html
│ │ ├── PlotDegreeAmplitudes.html
│ │ ├── PlotGraph.html
│ │ ├── PlotMap.html
│ │ ├── PlotMatrix.html
│ │ ├── PlotSphericalHarmonicsTriangle.html
│ │ ├── PotentialCoefficients2BlockMeanTimeSplines.html
│ │ ├── PotentialCoefficients2DegreeAmplitudes.html
│ │ ├── PotentialCoefficients2DoodsonHarmonics.html
│ │ ├── PotentialCoefficients2Icgem.html
│ │ ├── PowerSpectralDensity2CovarianceFunction.html
│ │ ├── PreprocessingDualSst.html
│ │ ├── PreprocessingGradiometer.html
│ │ ├── PreprocessingPod.html
│ │ ├── PreprocessingSst.html
│ │ ├── PreprocessingVariationalEquation.html
│ │ ├── PreprocessingVariationalEquationOrbitFit.html
│ │ ├── PreprocessingVariationalEquationSstFit.html
│ │ ├── PsmslOceanBottomPressure2TimeSeries.html
│ │ ├── RadialBasisSplines2KernelCoefficients.html
│ │ ├── RinexObservation2GnssReceiver.html
│ │ ├── RunCommand.html
│ │ ├── Sacc2Orbit.html
│ │ ├── SatelliteModelCreate.html
│ │ ├── SatelliteTracking2GraceL1b.html
│ │ ├── Sentinel2StarCamera.html
│ │ ├── SentinelXml2Orbit.html
│ │ ├── SimulateAccelerometer.html
│ │ ├── SimulateAccelerometerCoMOffset.html
│ │ ├── SimulateGradiometer.html
│ │ ├── SimulateKeplerOrbit.html
│ │ ├── SimulateOrbit.html
│ │ ├── SimulateSatelliteTracking.html
│ │ ├── SimulateStarCamera.html
│ │ ├── SimulateStarCameraGnss.html
│ │ ├── SimulateStarCameraGrace.html
│ │ ├── SimulateStarCameraSentinel1.html
│ │ ├── SimulateStarCameraTerrasar.html
│ │ ├── Sinex2Normals.html
│ │ ├── Sinex2StationDiscontinuities.html
│ │ ├── Sinex2StationPosition.html
│ │ ├── Sinex2StationPositions.html
│ │ ├── Sinex2StationPostSeismicDeformation.html
│ │ ├── SinexEccentricties2SlrPlatform.html
│ │ ├── SinexMetadata2GlonassFrequencyNumber.html
│ │ ├── SinexMetadata2SatelliteModel.html
│ │ ├── SlrComModel2RangeBiasStationSatellite.html
│ │ ├── SlrProcessing.html
│ │ ├── SlrSinexDataHandling2Files.html
│ │ ├── Sp3Format2Orbit.html
│ │ ├── StarCamera2GraceL1b.html
│ │ ├── StarCamera2Orbex.html
│ │ ├── Swarm2Starcamera.html
│ │ ├── SynthesisSphericalHarmonicsMatrix.html
│ │ ├── TemporalRepresentation2TimeSeries.html
│ │ ├── TerraSarTandem2Orbit.html
│ │ ├── TerraSarTandem2StarCamera.html
│ │ ├── ThermosphericState2GriddedData.html
│ │ ├── TimeSeries2GriddedData.html
│ │ ├── TimeSeries2PotentialCoefficients.html
│ │ ├── TimeSeriesCreate.html
│ │ ├── Tle2Orbit.html
│ │ ├── Variational2OrbitAndStarCamera.html
│ │ ├── ViennaMappingFunctionGrid2File.html
│ │ ├── ViennaMappingFunctionStation2File.html
│ │ ├── autoregressiveModelSequenceType.html
│ │ ├── borderType.html
│ │ ├── classes.html
│ │ ├── conditionType.html
│ │ ├── cookbook.gnssNetwork.html
│ │ ├── cookbook.gnssPpp.html
│ │ ├── cookbook.gravityFieldGrace.html
│ │ ├── cookbook.gravityFieldPod.html
│ │ ├── cookbook.instrument.html
│ │ ├── cookbook.kinematicOrbit.html
│ │ ├── cookbook.regionalGeoid.html
│ │ ├── covariancePodType.html
│ │ ├── covarianceSstType.html
│ │ ├── digitalFilterType.html
│ │ ├── documentationSearchIndex.js
│ │ ├── doodson.html
│ │ ├── earthRotationType.html
│ │ ├── eclipseType.html
│ │ ├── ephemeridesType.html
│ │ ├── fileFormat_admittance.html
│ │ ├── fileFormat_arcList.html
│ │ ├── fileFormat_doodsonEarthOrientationParameter.html
│ │ ├── fileFormat_doodsonHarmonic.html
│ │ ├── fileFormat_earthOrientationParameter.html
│ │ ├── fileFormat_earthTide.html
│ │ ├── fileFormat_ephemerides.html
│ │ ├── fileFormat_gnssAntennaDefinition.html
│ │ ├── fileFormat_gnssReceiverDefinition.html
│ │ ├── fileFormat_gnssSignalBias.html
│ │ ├── fileFormat_griddedData.html
│ │ ├── fileFormat_griddedDataTimeSeries.html
│ │ ├── fileFormat_instrument.html
│ │ ├── fileFormat_matrix.html
│ │ ├── fileFormat_meanPolarMotion.html
│ │ ├── fileFormat_normalEquation.html
│ │ ├── fileFormat_oceanPoleTide.html
│ │ ├── fileFormat_parameterName.html
│ │ ├── fileFormat_platform.html
│ │ ├── fileFormat_polygon.html
│ │ ├── fileFormat_potentialCoefficients.html
│ │ ├── fileFormat_satelliteModel.html
│ │ ├── fileFormat_stringList.html
│ │ ├── fileFormat_stringTable.html
│ │ ├── fileFormat_tideGeneratingPotential.html
│ │ ├── fileFormat_timeSplinesCovariance.html
│ │ ├── fileFormat_timeSplinesGravityField.html
│ │ ├── fileFormat_variationalEquation.html
│ │ ├── forcesType.html
│ │ ├── fundamentals.autoregressiveModel.html
│ │ ├── fundamentals.basisSplines.html
│ │ ├── fundamentals.robustLeastSquares.html
│ │ ├── general.configFiles.html
│ │ ├── general.constants.html
│ │ ├── general.fileFormat.html
│ │ ├── general.gui.html
│ │ ├── general.loopsAndConditions.html
│ │ ├── general.parallelization.html
│ │ ├── general.parser.html
│ │ ├── gnssAntennaDefintionListType.html
│ │ ├── gnssParametrizationType.html
│ │ ├── gnssProcessingStepType.html
│ │ ├── gnssReceiverGeneratorType.html
│ │ ├── gnssTransmitterGeneratorType.html
│ │ ├── gnssType.html
│ │ ├── gravityfieldType.html
│ │ ├── gridType.html
│ │ ├── index.html
│ │ ├── instrumentTypeType.html
│ │ ├── interpolatorTimeSeriesType.html
│ │ ├── kernelType.html
│ │ ├── loopType.html
│ │ ├── magnetosphereType.html
│ │ ├── matrixGeneratorType.html
│ │ ├── miscAccelerationsType.html
│ │ ├── noiseGeneratorType.html
│ │ ├── normalEquationType.html
│ │ ├── observationType.html
│ │ ├── orbitPropagatorType.html
│ │ ├── parameterNamesType.html
│ │ ├── parameterSelectorType.html
│ │ ├── parametrizationAccelerationType.html
│ │ ├── parametrizationGnssAntennaType.html
│ │ ├── parametrizationGravityType.html
│ │ ├── parametrizationSatelliteTrackingType.html
│ │ ├── parametrizationTemporalType.html
│ │ ├── planetType.html
│ │ ├── platformSelectorType.html
│ │ ├── plotAxisType.html
│ │ ├── plotColorType.html
│ │ ├── plotColorbarType.html
│ │ ├── plotGraphLayerType.html
│ │ ├── plotLegendType.html
│ │ ├── plotLineType.html
│ │ ├── plotMapLayerType.html
│ │ ├── plotMapProjectionType.html
│ │ ├── plotSymbolType.html
│ │ ├── podRightSideType.html
│ │ ├── programType.html
│ │ ├── search.html
│ │ ├── sggRightSideType.html
│ │ ├── slrParametrizationType.html
│ │ ├── slrProcessingStepType.html
│ │ ├── slrSatelliteGeneratorType.html
│ │ ├── slrStationGeneratorType.html
│ │ ├── sphericalHarmonicsFilterType.html
│ │ ├── sphericalHarmonicsNumberingType.html
│ │ ├── sstRightSideType.html
│ │ ├── static/
│ │ │ ├── groops.css
│ │ │ ├── searchtools.js
│ │ │ └── template.html
│ │ ├── thermosphereType.html
│ │ ├── tidesType.html
│ │ ├── timeSeriesType.html
│ │ └── troposphereType.html
│ ├── index.html
│ ├── latex/
│ │ ├── .gitignore
│ │ ├── classes.auto.tex
│ │ ├── cookbook.gnssNetwork.tex
│ │ ├── cookbook.gnssPpp.tex
│ │ ├── cookbook.gravityFieldGrace.tex
│ │ ├── cookbook.gravityFieldPod.tex
│ │ ├── cookbook.instrument.tex
│ │ ├── cookbook.kinematicOrbit.tex
│ │ ├── cookbook.regionalGeoid.tex
│ │ ├── documentation.tex
│ │ ├── fundamentals.autoregressiveModel.tex
│ │ ├── fundamentals.basisSplines.tex
│ │ ├── fundamentals.robustLeastSquares.tex
│ │ ├── general.configFiles.tex
│ │ ├── general.constants.tex
│ │ ├── general.fileFormat.auto.tex
│ │ ├── general.fileFormat.tex
│ │ ├── general.gui.tex
│ │ ├── general.loopsAndConditions.tex
│ │ ├── general.parallelization.tex
│ │ ├── general.parser.auto.tex
│ │ ├── general.parser.tex
│ │ └── programs.auto.tex
│ ├── makeDocumentation.sh
│ └── source/
│ ├── .gitignore
│ └── Doxyfile
├── groops.xsd
├── gui/
│ ├── .gitignore
│ ├── addVariableDialog/
│ │ ├── addVariableDialog.cpp
│ │ ├── addVariableDialog.h
│ │ └── addVariableDialog.ui
│ ├── base/
│ │ ├── importGroops.h
│ │ ├── schema.cpp
│ │ ├── schema.h
│ │ ├── xml.cpp
│ │ └── xml.h
│ ├── executeDialog/
│ │ ├── executeDialog.cpp
│ │ ├── executeDialog.h
│ │ └── executeDialog.ui
│ ├── findReplaceDock/
│ │ ├── findReplaceDock.cpp
│ │ ├── findReplaceDock.h
│ │ └── findReplaceDock.ui
│ ├── groopsGui.pro
│ ├── main.cpp
│ ├── mainWindow/
│ │ ├── mainWindow.cpp
│ │ ├── mainWindow.h
│ │ ├── mainWindow.ui
│ │ ├── schemaSelector.cpp
│ │ ├── schemaSelector.h
│ │ ├── schemaSelector.ui
│ │ ├── sideBar.cpp
│ │ ├── sideBar.h
│ │ ├── tabs.cpp
│ │ └── tabs.h
│ ├── programDialog/
│ │ ├── programDialog.cpp
│ │ ├── programDialog.h
│ │ └── programDialog.ui
│ ├── resources/
│ │ ├── LICENSE_Apache2
│ │ ├── README.md
│ │ └── icons.qrc
│ ├── settingsDialog/
│ │ ├── settingsCommandDialog.cpp
│ │ ├── settingsCommandDialog.h
│ │ ├── settingsCommandDialog.ui
│ │ ├── settingsPathDialog.cpp
│ │ ├── settingsPathDialog.h
│ │ └── settingsPathDialog.ui
│ └── tree/
│ ├── tree.cpp
│ ├── tree.h
│ ├── treeElement.cpp
│ ├── treeElement.h
│ ├── treeElementAdd.cpp
│ ├── treeElementAdd.h
│ ├── treeElementBool.cpp
│ ├── treeElementBool.h
│ ├── treeElementChoice.cpp
│ ├── treeElementChoice.h
│ ├── treeElementComment.cpp
│ ├── treeElementComment.h
│ ├── treeElementComplex.cpp
│ ├── treeElementComplex.h
│ ├── treeElementFileName.cpp
│ ├── treeElementFileName.h
│ ├── treeElementGlobal.cpp
│ ├── treeElementGlobal.h
│ ├── treeElementLoopCondition.h
│ ├── treeElementProgram.cpp
│ ├── treeElementProgram.h
│ ├── treeElementSequence.cpp
│ ├── treeElementSequence.h
│ ├── treeElementSimple.cpp
│ ├── treeElementSimple.h
│ ├── treeElementTime.cpp
│ ├── treeElementTime.h
│ ├── treeElementUnknown.cpp
│ ├── treeElementUnknown.h
│ ├── treeItem.cpp
│ └── treeItem.h
└── source/
├── .gitignore
├── CMakeLists.txt
├── base/
│ ├── angle.h
│ ├── basisSplines.h
│ ├── constants.cpp
│ ├── constants.h
│ ├── doodson.cpp
│ ├── doodson.h
│ ├── ellipsoid.cpp
│ ├── ellipsoid.h
│ ├── equinoctial.cpp
│ ├── equinoctial.h
│ ├── exception.h
│ ├── format.cpp
│ ├── format.h
│ ├── fourier.cpp
│ ├── fourier.h
│ ├── gnssType.cpp
│ ├── gnssType.h
│ ├── griddedData.cpp
│ ├── griddedData.h
│ ├── import.h
│ ├── importStd.h
│ ├── kepler.cpp
│ ├── kepler.h
│ ├── legendreFunction.cpp
│ ├── legendreFunction.h
│ ├── legendrePolynomial.cpp
│ ├── legendrePolynomial.h
│ ├── matrix.cpp
│ ├── matrix.h
│ ├── parameterName.cpp
│ ├── parameterName.h
│ ├── planets.cpp
│ ├── planets.h
│ ├── polynomial.cpp
│ ├── polynomial.h
│ ├── portable.h
│ ├── rotary3d.cpp
│ ├── rotary3d.h
│ ├── sphericalHarmonics.cpp
│ ├── sphericalHarmonics.h
│ ├── string.cpp
│ ├── string.h
│ ├── tensor3d.h
│ ├── tideGeneratingPotential.h
│ ├── time.cpp
│ ├── time.h
│ ├── transform3d.cpp
│ ├── transform3d.h
│ ├── vector3d.h
│ ├── wavelets.cpp
│ └── wavelets.h
├── classes/
│ ├── border/
│ │ ├── border.cpp
│ │ ├── border.h
│ │ ├── borderCap.h
│ │ ├── borderGlobal.h
│ │ ├── borderPolygon.h
│ │ └── borderRectangle.h
│ ├── condition/
│ │ ├── condition.cpp
│ │ ├── condition.h
│ │ ├── conditionAnd.h
│ │ ├── conditionCommand.h
│ │ ├── conditionExpression.h
│ │ ├── conditionFileExist.h
│ │ ├── conditionMatrix.h
│ │ ├── conditionMatrixEmpty.h
│ │ ├── conditionNot.h
│ │ ├── conditionOr.h
│ │ ├── conditionStringContainsPattern.h
│ │ └── conditionStringMatchPattern.h
│ ├── digitalFilter/
│ │ ├── digitalFilter.cpp
│ │ ├── digitalFilter.h
│ │ ├── digitalFilterButterworth.cpp
│ │ ├── digitalFilterButterworth.h
│ │ ├── digitalFilterCorrelation.h
│ │ ├── digitalFilterDecorrelation.h
│ │ ├── digitalFilterDerivative.h
│ │ ├── digitalFilterFile.h
│ │ ├── digitalFilterGraceLowpass.h
│ │ ├── digitalFilterIntegral.h
│ │ ├── digitalFilterLag.h
│ │ ├── digitalFilterMedian.h
│ │ ├── digitalFilterMovingAverage.h
│ │ ├── digitalFilterNotch.cpp
│ │ ├── digitalFilterNotch.h
│ │ ├── digitalFilterReduceFilterOutput.h
│ │ └── digitalFilterWavelet.h
│ ├── earthRotation/
│ │ ├── earthRotation.cpp
│ │ ├── earthRotation.h
│ │ ├── earthRotationEra.h
│ │ ├── earthRotationFile.cpp
│ │ ├── earthRotationFile.h
│ │ ├── earthRotationGmst.h
│ │ ├── earthRotationIers1996.cpp
│ │ ├── earthRotationIers1996.h
│ │ ├── earthRotationIers2003.cpp
│ │ ├── earthRotationIers2003.h
│ │ ├── earthRotationIers2010.cpp
│ │ ├── earthRotationIers2010.h
│ │ ├── earthRotationIers2010b.cpp
│ │ ├── earthRotationIers2010b.h
│ │ ├── earthRotationStarCamera.cpp
│ │ ├── earthRotationStarCamera.h
│ │ ├── earthRotationZAxis.h
│ │ └── moonRotation.h
│ ├── eclipse/
│ │ ├── eclipse.cpp
│ │ ├── eclipse.h
│ │ ├── eclipseConical.h
│ │ └── eclipseSOLAARS.h
│ ├── ephemerides/
│ │ ├── ephemerides.cpp
│ │ ├── ephemerides.h
│ │ └── ephemeridesJpl.h
│ ├── forces/
│ │ ├── forces.cpp
│ │ └── forces.h
│ ├── gravityfield/
│ │ ├── gravityfield.cpp
│ │ ├── gravityfield.h
│ │ ├── gravityfieldEarthquakeOscillation.cpp
│ │ ├── gravityfieldEarthquakeOscillation.h
│ │ ├── gravityfieldFilter.cpp
│ │ ├── gravityfieldFilter.h
│ │ ├── gravityfieldFromParametrization.cpp
│ │ ├── gravityfieldFromParametrization.h
│ │ ├── gravityfieldGroup.cpp
│ │ ├── gravityfieldGroup.h
│ │ ├── gravityfieldInInterval.cpp
│ │ ├── gravityfieldInInterval.h
│ │ ├── gravityfieldOscillation.cpp
│ │ ├── gravityfieldOscillation.h
│ │ ├── gravityfieldPotentialCoefficients.cpp
│ │ ├── gravityfieldPotentialCoefficients.h
│ │ ├── gravityfieldPotentialCoefficientsInterior.cpp
│ │ ├── gravityfieldPotentialCoefficientsInterior.h
│ │ ├── gravityfieldTides.cpp
│ │ ├── gravityfieldTides.h
│ │ ├── gravityfieldTimeSplines.cpp
│ │ ├── gravityfieldTimeSplines.h
│ │ ├── gravityfieldTopography.cpp
│ │ ├── gravityfieldTopography.h
│ │ ├── gravityfieldTrend.cpp
│ │ └── gravityfieldTrend.h
│ ├── grid/
│ │ ├── grid.cpp
│ │ ├── grid.h
│ │ ├── gridCorput.h
│ │ ├── gridDriscoll.h
│ │ ├── gridFile.h
│ │ ├── gridGauss.h
│ │ ├── gridGeograph.h
│ │ ├── gridReuter.h
│ │ ├── gridSinglePoint.h
│ │ ├── gridSinglePointCartesian.h
│ │ ├── gridTriangleCenter.h
│ │ └── gridTriangleVertex.h
│ ├── instrumentType/
│ │ ├── instrumentType.cpp
│ │ └── instrumentType.h
│ ├── interpolatorTimeSeries/
│ │ ├── interpolatorTimeSeries.cpp
│ │ ├── interpolatorTimeSeries.h
│ │ ├── interpolatorTimeSeriesFillGapsLeastSquaresPolynomialFit.h
│ │ ├── interpolatorTimeSeriesLeastSquaresPolynomialFit.h
│ │ └── interpolatorTimeSeriesPolynomial.h
│ ├── kernel/
│ │ ├── kernel.cpp
│ │ ├── kernel.h
│ │ ├── kernelBlackmanLowPass.cpp
│ │ ├── kernelBlackmanLowPass.h
│ │ ├── kernelBottomPressure.cpp
│ │ ├── kernelBottomPressure.h
│ │ ├── kernelCoefficients.cpp
│ │ ├── kernelCoefficients.h
│ │ ├── kernelDeformation.cpp
│ │ ├── kernelDeformation.h
│ │ ├── kernelFilterGauss.cpp
│ │ ├── kernelFilterGauss.h
│ │ ├── kernelGeoid.cpp
│ │ ├── kernelGeoid.h
│ │ ├── kernelHotine.cpp
│ │ ├── kernelHotine.h
│ │ ├── kernelPoisson.cpp
│ │ ├── kernelPoisson.h
│ │ ├── kernelRadialGradient.cpp
│ │ ├── kernelRadialGradient.h
│ │ ├── kernelSelenoid.cpp
│ │ ├── kernelSelenoid.h
│ │ ├── kernelSingleLayer.cpp
│ │ ├── kernelSingleLayer.h
│ │ ├── kernelStokes.cpp
│ │ ├── kernelStokes.h
│ │ ├── kernelTruncation.cpp
│ │ ├── kernelTruncation.h
│ │ ├── kernelWaterHeight.cpp
│ │ └── kernelWaterHeight.h
│ ├── loop/
│ │ ├── loop.cpp
│ │ ├── loop.h
│ │ ├── loopCommandOutput.h
│ │ ├── loopDirectoryListing.h
│ │ ├── loopFileGnssStationInfo.h
│ │ ├── loopFileStringList.h
│ │ ├── loopFileStringTable.h
│ │ ├── loopFileTextLines.h
│ │ ├── loopLoop.h
│ │ ├── loopManualTable.h
│ │ ├── loopMatrix.h
│ │ ├── loopNumberSequence.h
│ │ ├── loopPlatformEquipment.h
│ │ ├── loopSortAndRemoveDuplicates.h
│ │ ├── loopStringList.h
│ │ ├── loopStringTable.h
│ │ ├── loopTimeIntervals.h
│ │ └── loopTimeSeries.h
│ ├── magnetosphere/
│ │ ├── magnetosphere.cpp
│ │ ├── magnetosphere.h
│ │ └── magnetosphereIgrf.h
│ ├── matrixGenerator/
│ │ ├── matrixGenerator.cpp
│ │ ├── matrixGenerator.h
│ │ ├── matrixGeneratorAppend.h
│ │ ├── matrixGeneratorCholesky.h
│ │ ├── matrixGeneratorDiagonal.h
│ │ ├── matrixGeneratorEigenValues.h
│ │ ├── matrixGeneratorElementManipulation.h
│ │ ├── matrixGeneratorElementWiseOperation.h
│ │ ├── matrixGeneratorExpression.h
│ │ ├── matrixGeneratorFile.h
│ │ ├── matrixGeneratorFromDiagonal.h
│ │ ├── matrixGeneratorInverse.h
│ │ ├── matrixGeneratorMultiplication.h
│ │ ├── matrixGeneratorNormalsFile.h
│ │ ├── matrixGeneratorRankKUpdate.h
│ │ ├── matrixGeneratorReorder.h
│ │ ├── matrixGeneratorReshape.h
│ │ ├── matrixGeneratorSetType.h
│ │ ├── matrixGeneratorShift.h
│ │ ├── matrixGeneratorSlice.h
│ │ ├── matrixGeneratorSort.h
│ │ └── matrixGeneratorTranspose.h
│ ├── miscAccelerations/
│ │ ├── miscAccelerations.cpp
│ │ ├── miscAccelerations.h
│ │ ├── miscAccelerationsAlbedo.cpp
│ │ ├── miscAccelerationsAlbedo.h
│ │ ├── miscAccelerationsAntennaThrust.h
│ │ ├── miscAccelerationsAtmosphericDrag.h
│ │ ├── miscAccelerationsAtmosphericDragFromDensityFile.h
│ │ ├── miscAccelerationsFromParametrization.h
│ │ ├── miscAccelerationsGroup.h
│ │ ├── miscAccelerationsRadiationPressure.h
│ │ ├── miscAccelerationsRelativisticEffect.h
│ │ └── miscAccelerationsSolarRadiationPressure.h
│ ├── noiseGenerator/
│ │ ├── noiseGenerator.cpp
│ │ ├── noiseGenerator.h
│ │ ├── noiseGeneratorDigitalFilter.h
│ │ ├── noiseGeneratorExpressionPSD.h
│ │ ├── noiseGeneratorPowerLaw.h
│ │ └── noiseGeneratorWhite.h
│ ├── normalEquation/
│ │ ├── normalEquation.cpp
│ │ ├── normalEquation.h
│ │ ├── normalEquationDesign.cpp
│ │ ├── normalEquationDesign.h
│ │ ├── normalEquationDesignVCE.cpp
│ │ ├── normalEquationDesignVCE.h
│ │ ├── normalEquationFile.cpp
│ │ ├── normalEquationFile.h
│ │ ├── normalEquationRegularization.cpp
│ │ ├── normalEquationRegularization.h
│ │ ├── normalEquationRegularizationGeneralized.cpp
│ │ └── normalEquationRegularizationGeneralized.h
│ ├── observation/
│ │ ├── observation.cpp
│ │ ├── observation.h
│ │ ├── observationDeflections.cpp
│ │ ├── observationDeflections.h
│ │ ├── observationDualSstVariational.cpp
│ │ ├── observationDualSstVariational.h
│ │ ├── observationGradiometer.cpp
│ │ ├── observationGradiometer.h
│ │ ├── observationPodAcceleration.cpp
│ │ ├── observationPodAcceleration.h
│ │ ├── observationPodEnergy.cpp
│ │ ├── observationPodEnergy.h
│ │ ├── observationPodIntegral.cpp
│ │ ├── observationPodIntegral.h
│ │ ├── observationPodVariational.cpp
│ │ ├── observationPodVariational.h
│ │ ├── observationSstIntegral.cpp
│ │ ├── observationSstIntegral.h
│ │ ├── observationSstVariational.cpp
│ │ ├── observationSstVariational.h
│ │ ├── observationStationLoading.cpp
│ │ ├── observationStationLoading.h
│ │ ├── observationTerrestrial.cpp
│ │ └── observationTerrestrial.h
│ ├── orbitPropagator/
│ │ ├── orbitPropagator.cpp
│ │ ├── orbitPropagator.h
│ │ ├── orbitPropagatorAdamsBashforthMoulton.h
│ │ ├── orbitPropagatorEuler.h
│ │ ├── orbitPropagatorFile.h
│ │ ├── orbitPropagatorGaussJackson.cpp
│ │ ├── orbitPropagatorGaussJackson.h
│ │ ├── orbitPropagatorPolynomial.h
│ │ ├── orbitPropagatorRungeKutta4.h
│ │ └── orbitPropagatorStoermerCowell.h
│ ├── parameterNames/
│ │ ├── parameterNames.cpp
│ │ ├── parameterNames.h
│ │ ├── parameterNamesAcceleration.h
│ │ ├── parameterNamesFile.h
│ │ ├── parameterNamesGnssAntenna.h
│ │ ├── parameterNamesGravity.h
│ │ ├── parameterNamesName.h
│ │ ├── parameterNamesObservation.h
│ │ ├── parameterNamesRename.h
│ │ ├── parameterNamesSatelliteTracking.h
│ │ ├── parameterNamesSelection.h
│ │ ├── parameterNamesTemporal.h
│ │ └── parameterNamesWithoutDuplicates.h
│ ├── parameterSelector/
│ │ ├── parameterSelector.cpp
│ │ ├── parameterSelector.h
│ │ ├── parameterSelectorComplement.h
│ │ ├── parameterSelectorGroup.h
│ │ ├── parameterSelectorMatrix.h
│ │ ├── parameterSelectorNames.h
│ │ ├── parameterSelectorRange.h
│ │ ├── parameterSelectorWildcard.h
│ │ └── parameterSelectorZeros.h
│ ├── parametrizationAcceleration/
│ │ ├── parametrizationAcceleration.cpp
│ │ ├── parametrizationAcceleration.h
│ │ ├── parametrizationAccelerationAccBias.h
│ │ ├── parametrizationAccelerationAccScaleFactors.h
│ │ ├── parametrizationAccelerationGnssSolarRadiation.h
│ │ ├── parametrizationAccelerationModelScale.h
│ │ ├── parametrizationAccelerationPerRevolution.h
│ │ └── parametrizationAccelerationThermosphericDensity.h
│ ├── parametrizationGnssAntenna/
│ │ ├── parametrizationGnssAntenna.cpp
│ │ ├── parametrizationGnssAntenna.h
│ │ ├── parametrizationGnssAntennaCenter.h
│ │ ├── parametrizationGnssAntennaRadialBasis.h
│ │ └── parametrizationGnssAntennaSphericalHarmonics.h
│ ├── parametrizationGravity/
│ │ ├── parametrizationGravity.cpp
│ │ ├── parametrizationGravity.h
│ │ ├── parametrizationGravityEarthquakeOscillation.cpp
│ │ ├── parametrizationGravityEarthquakeOscillation.h
│ │ ├── parametrizationGravityLinearTransformation.cpp
│ │ ├── parametrizationGravityLinearTransformation.h
│ │ ├── parametrizationGravityRadialBasis.cpp
│ │ ├── parametrizationGravityRadialBasis.h
│ │ ├── parametrizationGravitySphericalHarmonics.cpp
│ │ ├── parametrizationGravitySphericalHarmonics.h
│ │ ├── parametrizationGravityTemporal.cpp
│ │ └── parametrizationGravityTemporal.h
│ ├── parametrizationSatelliteTracking/
│ │ ├── parametrizationSatelliteTracking.cpp
│ │ ├── parametrizationSatelliteTracking.h
│ │ ├── parametrizationSatelliteTrackingAntennaCenter.h
│ │ ├── parametrizationSatelliteTrackingBias.h
│ │ ├── parametrizationSatelliteTrackingScale.h
│ │ ├── parametrizationSatelliteTrackingScaleModel.h
│ │ ├── parametrizationSatelliteTrackingSpecialEffect.h
│ │ └── parametrizationSatelliteTrackingTimeBias.h
│ ├── parametrizationTemporal/
│ │ ├── parametrizationTemporal.cpp
│ │ ├── parametrizationTemporal.h
│ │ ├── parametrizationTemporalConstant.cpp
│ │ ├── parametrizationTemporalConstant.h
│ │ ├── parametrizationTemporalDoodsonHarmonic.cpp
│ │ ├── parametrizationTemporalDoodsonHarmonic.h
│ │ ├── parametrizationTemporalFourier.cpp
│ │ ├── parametrizationTemporalFourier.h
│ │ ├── parametrizationTemporalOscillation.cpp
│ │ ├── parametrizationTemporalOscillation.h
│ │ ├── parametrizationTemporalPolynomial.cpp
│ │ ├── parametrizationTemporalPolynomial.h
│ │ ├── parametrizationTemporalSplines.cpp
│ │ ├── parametrizationTemporalSplines.h
│ │ └── parametrizationTemporalTrend.h
│ ├── platformSelector/
│ │ ├── platformSelector.cpp
│ │ ├── platformSelector.h
│ │ ├── platformSelectorAll.h
│ │ ├── platformSelectorEquipment.h
│ │ ├── platformSelectorExclude.h
│ │ ├── platformSelectorFile.h
│ │ └── platformSelectorWildcard.h
│ ├── sphericalHarmonicsFilter/
│ │ ├── sphericalHarmonicsFilter.cpp
│ │ ├── sphericalHarmonicsFilter.h
│ │ ├── sphericalHarmonicsFilterDdk.h
│ │ ├── sphericalHarmonicsFilterGauss.h
│ │ └── sphericalHarmonicsFilterMatrix.h
│ ├── sphericalHarmonicsNumbering/
│ │ ├── sphericalHarmonicsNumbering.cpp
│ │ ├── sphericalHarmonicsNumbering.h
│ │ ├── sphericalHarmonicsNumberingDegree.h
│ │ ├── sphericalHarmonicsNumberingFile.h
│ │ ├── sphericalHarmonicsNumberingOrder.h
│ │ └── sphericalHarmonicsNumberingOrderNonAlternating.h
│ ├── thermosphere/
│ │ ├── thermosphere.cpp
│ │ ├── thermosphere.h
│ │ ├── thermosphereJB2008.h
│ │ └── thermosphereNRLMSIS2.h
│ ├── tides/
│ │ ├── tides.cpp
│ │ ├── tides.h
│ │ ├── tidesAstronomical.cpp
│ │ ├── tidesAstronomical.h
│ │ ├── tidesCentrifugal.cpp
│ │ ├── tidesCentrifugal.h
│ │ ├── tidesDoodsonHarmonic.cpp
│ │ ├── tidesDoodsonHarmonic.h
│ │ ├── tidesEarth.cpp
│ │ ├── tidesEarth.h
│ │ ├── tidesGroup.cpp
│ │ ├── tidesGroup.h
│ │ ├── tidesOceanPole.cpp
│ │ ├── tidesOceanPole.h
│ │ ├── tidesPole.cpp
│ │ ├── tidesPole.h
│ │ ├── tidesSolidMoon.cpp
│ │ └── tidesSolidMoon.h
│ ├── timeSeries/
│ │ ├── timeSeries.cpp
│ │ ├── timeSeries.h
│ │ ├── timeSeriesConditional.h
│ │ ├── timeSeriesEveryMonth.h
│ │ ├── timeSeriesEveryYear.h
│ │ ├── timeSeriesExclude.h
│ │ ├── timeSeriesInstrument.h
│ │ ├── timeSeriesInstrumentArcIntervals.h
│ │ ├── timeSeriesInterpolate.h
│ │ ├── timeSeriesIrregular.h
│ │ ├── timeSeriesMonthly.h
│ │ ├── timeSeriesOrbitRevolutions.h
│ │ ├── timeSeriesUniformInterval.h
│ │ ├── timeSeriesUniformSampling.h
│ │ └── timeSeriesYearly.h
│ └── troposphere/
│ ├── troposphere.cpp
│ ├── troposphere.h
│ ├── troposphereGpt.cpp
│ ├── troposphereGpt.h
│ ├── troposphereMendesAndPavlis.h
│ ├── troposphereViennaMapping.cpp
│ └── troposphereViennaMapping.h
├── config/
│ ├── config.cpp
│ ├── config.h
│ ├── configRegister.h
│ ├── generateDocumentation.cpp
│ └── generateDocumentation.h
├── external/
│ ├── README.md
│ ├── compress.h
│ ├── fortran.h
│ ├── hwm/
│ │ ├── README.txt
│ │ ├── hwm.h
│ │ └── hwm14.f90
│ ├── iers/
│ │ ├── CNMTX.F
│ │ ├── FUNDARG.f
│ │ ├── ORTHO_EOP.F
│ │ ├── PMSDNUT2.F
│ │ ├── UTLIBR.F
│ │ ├── iers.h
│ │ ├── pmsdnut.f
│ │ └── ray.f
│ ├── igrf/
│ │ ├── igrf.h
│ │ └── igrf14.f
│ ├── jb2008/
│ │ ├── JB2008.f
│ │ ├── README.txt
│ │ └── jb2008.h
│ ├── lapack/
│ │ ├── blas.h
│ │ ├── blasWrapper.f
│ │ ├── lapack.h
│ │ └── lapackWrapper.f
│ ├── nrlmsis2/
│ │ ├── alt2gph.F90
│ │ ├── msis_calc.F90
│ │ ├── msis_constants.F90
│ │ ├── msis_dfn.F90
│ │ ├── msis_gfn.F90
│ │ ├── msis_init.F90
│ │ ├── msis_tfn.F90
│ │ ├── nrlmsis2.h
│ │ ├── nrlmsis2Wrapper.F90
│ │ └── readme.txt
│ └── sgp4/
│ ├── LICENSE
│ ├── README.txt
│ ├── SGP4.cpp
│ └── SGP4.h
├── files/
│ ├── fileAdmittance.cpp
│ ├── fileAdmittance.h
│ ├── fileArcList.cpp
│ ├── fileArcList.h
│ ├── fileDoodsonEarthOrientationParameter.cpp
│ ├── fileDoodsonEarthOrientationParameter.h
│ ├── fileDoodsonHarmonic.cpp
│ ├── fileDoodsonHarmonic.h
│ ├── fileEarthOrientationParameter.cpp
│ ├── fileEarthOrientationParameter.h
│ ├── fileEarthTide.cpp
│ ├── fileEarthTide.h
│ ├── fileEphemerides.cpp
│ ├── fileEphemerides.h
│ ├── fileFormatRegister.h
│ ├── fileGnssAntennaDefinition.cpp
│ ├── fileGnssAntennaDefinition.h
│ ├── fileGnssReceiverDefinition.cpp
│ ├── fileGnssReceiverDefinition.h
│ ├── fileGnssSignalBias.cpp
│ ├── fileGnssSignalBias.h
│ ├── fileGriddedData.cpp
│ ├── fileGriddedData.h
│ ├── fileGriddedDataTimeSeries.cpp
│ ├── fileGriddedDataTimeSeries.h
│ ├── fileInstrument.cpp
│ ├── fileInstrument.h
│ ├── fileMatrix.cpp
│ ├── fileMatrix.h
│ ├── fileMeanPolarMotion.cpp
│ ├── fileMeanPolarMotion.h
│ ├── fileNormalEquation.cpp
│ ├── fileNormalEquation.h
│ ├── fileOceanPoleTide.cpp
│ ├── fileOceanPoleTide.h
│ ├── fileParameterName.cpp
│ ├── fileParameterName.h
│ ├── filePlatform.cpp
│ ├── filePlatform.h
│ ├── filePolygon.cpp
│ ├── filePolygon.h
│ ├── fileSatelliteModel.cpp
│ ├── fileSatelliteModel.h
│ ├── fileSphericalHarmonics.cpp
│ ├── fileSphericalHarmonics.h
│ ├── fileStringTable.cpp
│ ├── fileStringTable.h
│ ├── fileTideGeneratingPotential.cpp
│ ├── fileTideGeneratingPotential.h
│ ├── fileTimeSplinesGravityfield.cpp
│ ├── fileTimeSplinesGravityfield.h
│ ├── fileVariationalEquation.cpp
│ └── fileVariationalEquation.h
├── gnss/
│ ├── gnss.cpp
│ ├── gnss.h
│ ├── gnssDesignMatrix.cpp
│ ├── gnssDesignMatrix.h
│ ├── gnssLambda.cpp
│ ├── gnssLambda.h
│ ├── gnssNormalEquationInfo.cpp
│ ├── gnssNormalEquationInfo.h
│ ├── gnssObservation.cpp
│ ├── gnssObservation.h
│ ├── gnssParametrization/
│ │ ├── gnssParametrization.cpp
│ │ ├── gnssParametrization.h
│ │ ├── gnssParametrizationAmbiguities.cpp
│ │ ├── gnssParametrizationAmbiguities.h
│ │ ├── gnssParametrizationClocks.cpp
│ │ ├── gnssParametrizationClocks.h
│ │ ├── gnssParametrizationClocksModel.cpp
│ │ ├── gnssParametrizationClocksModel.h
│ │ ├── gnssParametrizationCodeBiases.cpp
│ │ ├── gnssParametrizationCodeBiases.h
│ │ ├── gnssParametrizationConstraints.h
│ │ ├── gnssParametrizationEarthRotation.cpp
│ │ ├── gnssParametrizationEarthRotation.h
│ │ ├── gnssParametrizationGroup.h
│ │ ├── gnssParametrizationIonosphereMap.cpp
│ │ ├── gnssParametrizationIonosphereMap.h
│ │ ├── gnssParametrizationIonosphereSTEC.cpp
│ │ ├── gnssParametrizationIonosphereSTEC.h
│ │ ├── gnssParametrizationIonosphereVTEC.cpp
│ │ ├── gnssParametrizationIonosphereVTEC.h
│ │ ├── gnssParametrizationKinematicPositions.cpp
│ │ ├── gnssParametrizationKinematicPositions.h
│ │ ├── gnssParametrizationLeoDynamicOrbits.cpp
│ │ ├── gnssParametrizationLeoDynamicOrbits.h
│ │ ├── gnssParametrizationReceiverAntennas.cpp
│ │ ├── gnssParametrizationReceiverAntennas.h
│ │ ├── gnssParametrizationSignalBiases.cpp
│ │ ├── gnssParametrizationSignalBiases.h
│ │ ├── gnssParametrizationStaticPositions.cpp
│ │ ├── gnssParametrizationStaticPositions.h
│ │ ├── gnssParametrizationTecBiases.cpp
│ │ ├── gnssParametrizationTecBiases.h
│ │ ├── gnssParametrizationTemporalBias.cpp
│ │ ├── gnssParametrizationTemporalBias.h
│ │ ├── gnssParametrizationTransmitterAntennas.cpp
│ │ ├── gnssParametrizationTransmitterAntennas.h
│ │ ├── gnssParametrizationTransmitterDynamicOrbits.cpp
│ │ ├── gnssParametrizationTransmitterDynamicOrbits.h
│ │ ├── gnssParametrizationTroposphere.cpp
│ │ └── gnssParametrizationTroposphere.h
│ ├── gnssProcessingStep/
│ │ ├── gnssProcessingStep.cpp
│ │ ├── gnssProcessingStep.h
│ │ ├── gnssProcessingStepComputeCovarianceMatrix.h
│ │ ├── gnssProcessingStepDisableTransmitterShadowEpochs.h
│ │ ├── gnssProcessingStepEstimate.h
│ │ ├── gnssProcessingStepForEachReceiverSeparately.h
│ │ ├── gnssProcessingStepGroup.h
│ │ ├── gnssProcessingStepPrintResidualStatistics.h
│ │ ├── gnssProcessingStepResolveAmbiguities.h
│ │ ├── gnssProcessingStepSelectEpochs.h
│ │ ├── gnssProcessingStepSelectNormalsBlockStructure.h
│ │ ├── gnssProcessingStepSelectParametrizations.h
│ │ ├── gnssProcessingStepSelectReceivers.h
│ │ ├── gnssProcessingStepWriteAprioriSolution.h
│ │ ├── gnssProcessingStepWriteNormalEquations.h
│ │ ├── gnssProcessingStepWriteResiduals.h
│ │ ├── gnssProcessingStepWriteResults.h
│ │ ├── gnssProcessingStepWriteUsedStationList.h
│ │ └── gnssProcessingStepWriteUsedTransmitterList.h
│ ├── gnssReceiver.cpp
│ ├── gnssReceiver.h
│ ├── gnssReceiverGenerator/
│ │ ├── gnssReceiverGenerator.cpp
│ │ ├── gnssReceiverGenerator.h
│ │ ├── gnssReceiverGeneratorLowEarthOrbiter.cpp
│ │ ├── gnssReceiverGeneratorLowEarthOrbiter.h
│ │ ├── gnssReceiverGeneratorStationNetwork.cpp
│ │ └── gnssReceiverGeneratorStationNetwork.h
│ ├── gnssTransceiver.h
│ ├── gnssTransmitter.h
│ └── gnssTransmitterGenerator/
│ ├── gnssTransmitterGenerator.cpp
│ ├── gnssTransmitterGenerator.h
│ ├── gnssTransmitterGeneratorGnss.cpp
│ └── gnssTransmitterGeneratorGnss.h
├── groops.cpp
├── inputOutput/
│ ├── archive.cpp
│ ├── archive.h
│ ├── archiveAscii.cpp
│ ├── archiveAscii.h
│ ├── archiveBinary.cpp
│ ├── archiveBinary.h
│ ├── archiveJson.cpp
│ ├── archiveJson.h
│ ├── archiveXml.cpp
│ ├── archiveXml.h
│ ├── file.cpp
│ ├── file.h
│ ├── fileArchive.cpp
│ ├── fileArchive.h
│ ├── fileName.cpp
│ ├── fileName.h
│ ├── fileNetCdf.cpp
│ ├── fileNetCdf.h
│ ├── fileSinex.cpp
│ ├── fileSinex.h
│ ├── logging.cpp
│ ├── logging.h
│ ├── settings.cpp
│ ├── settings.h
│ ├── system.cpp
│ └── system.h
├── misc/
│ ├── grace/
│ │ ├── graceKBandGeometry.cpp
│ │ └── graceKBandGeometry.h
│ ├── kalmanProcessing.cpp
│ ├── kalmanProcessing.h
│ ├── miscGriddedData.cpp
│ ├── miscGriddedData.h
│ ├── normalsShortTimeStaticLongTime.cpp
│ ├── normalsShortTimeStaticLongTime.h
│ ├── observation/
│ │ ├── covariancePod.cpp
│ │ ├── covariancePod.h
│ │ ├── covarianceSst.cpp
│ │ ├── covarianceSst.h
│ │ ├── integralEquation.cpp
│ │ ├── integralEquation.h
│ │ ├── observationMisc.cpp
│ │ ├── observationMisc.h
│ │ ├── observationMiscDualSstVariational.cpp
│ │ ├── observationMiscDualSstVariational.h
│ │ ├── observationMiscPod.cpp
│ │ ├── observationMiscPod.h
│ │ ├── observationMiscPodIntegral.cpp
│ │ ├── observationMiscPodIntegral.h
│ │ ├── observationMiscPodVariational.cpp
│ │ ├── observationMiscPodVariational.h
│ │ ├── observationMiscSst.cpp
│ │ ├── observationMiscSst.h
│ │ ├── observationMiscSstIntegral.cpp
│ │ ├── observationMiscSstIntegral.h
│ │ ├── observationMiscSstVariational.cpp
│ │ ├── observationMiscSstVariational.h
│ │ ├── variationalEquation.cpp
│ │ ├── variationalEquation.h
│ │ ├── variationalEquationFromFile.cpp
│ │ └── variationalEquationFromFile.h
│ ├── varianceComponentEstimation.cpp
│ └── varianceComponentEstimation.h
├── parallel/
│ ├── matrixDistributed.cpp
│ ├── matrixDistributed.h
│ ├── parallel.h
│ ├── parallelCluster.cpp
│ └── parallelSingle.cpp
├── parser/
│ ├── dataVariables.cpp
│ ├── dataVariables.h
│ ├── expressionParser.cpp
│ ├── expressionParser.h
│ ├── stringParser.cpp
│ ├── stringParser.h
│ ├── xml.cpp
│ └── xml.h
├── plot/
│ ├── plotAxis.cpp
│ ├── plotAxis.h
│ ├── plotColorbar.cpp
│ ├── plotColorbar.h
│ ├── plotGraphLayer.cpp
│ ├── plotGraphLayer.h
│ ├── plotLegend.cpp
│ ├── plotLegend.h
│ ├── plotMapLayer.cpp
│ ├── plotMapLayer.h
│ ├── plotMapProjection.cpp
│ ├── plotMapProjection.h
│ ├── plotMisc.cpp
│ └── plotMisc.h
├── programs/
│ ├── conversion/
│ │ ├── berneseKinematic2Orbit.cpp
│ │ ├── doodsonHarmonics/
│ │ │ ├── doodsonAdmittance2SupplementaryFiles.cpp
│ │ │ ├── doodsonHarmonics2IersPotential.cpp
│ │ │ ├── doodsonHarmonics2IersWaterHeight.cpp
│ │ │ ├── graceAod2DoodsonHarmonics.cpp
│ │ │ ├── iersPotential2DoodsonHarmonics.cpp
│ │ │ └── iersWaterHeight2DoodsonHarmonics.cpp
│ │ ├── eop/
│ │ │ ├── iersC04IAU2000EarthOrientationParameter.cpp
│ │ │ ├── iersHighFrequentEop2DoodsonEop.cpp
│ │ │ ├── iersRapidIAU2000EarthOrientationParameter.cpp
│ │ │ └── igs2EarthOrientationParameter.cpp
│ │ ├── gnss/
│ │ │ ├── gnssAntex2AntennaDefinition.cpp
│ │ │ ├── gnssClock2ClockRinex.cpp
│ │ │ ├── gnssClockRinex2InstrumentClock.cpp
│ │ │ ├── gnssEop2IgsErp.cpp
│ │ │ ├── gnssGriddedDataTimeSeries2Ionex.cpp
│ │ │ ├── gnssIonex2GriddedDataTimeSeries.cpp
│ │ │ ├── gnssNormals2Sinex.cpp
│ │ │ ├── gnssOrbex2StarCamera.cpp
│ │ │ ├── gnssReceiver2RinexObservation.cpp
│ │ │ ├── gnssRinexNavigation2OrbitClock.cpp
│ │ │ ├── gnssSignalBias2SinexBias.cpp
│ │ │ ├── gnssSinexBias2SignalBias.cpp
│ │ │ ├── gnssStationLog2Platform.cpp
│ │ │ ├── gnssTroposphere2TropoSinex.cpp
│ │ │ ├── rinexObservation2GnssReceiver.cpp
│ │ │ ├── sinex2StationDiscontinuities.cpp
│ │ │ ├── sinex2StationPositions.cpp
│ │ │ ├── sinexMetadata2GlonassFrequencyNumber.cpp
│ │ │ └── sinexMetadata2SatelliteModel.cpp
│ │ ├── goce/
│ │ │ ├── goceXml2Gradiometer.cpp
│ │ │ ├── goceXml2Orbit.cpp
│ │ │ ├── goceXml2StarCamera.cpp
│ │ │ └── goceXmlEggNom1b.cpp
│ │ ├── grace/
│ │ │ ├── accelerometer2GraceL1b.cpp
│ │ │ ├── fileGrace.cpp
│ │ │ ├── fileGrace.h
│ │ │ ├── grace2PotentialCoefficients.cpp
│ │ │ ├── graceAccelerometer2L1bAscii.cpp
│ │ │ ├── graceCoefficients2BlockMeanTimeSplines.cpp
│ │ │ ├── graceL1a2Accelerometer.cpp
│ │ │ ├── graceL1a2SatelliteTracking.cpp
│ │ │ ├── graceL1a2StarCamera.cpp
│ │ │ ├── graceL1a2Temperature.cpp
│ │ │ ├── graceL1b2AccHousekeeping.cpp
│ │ │ ├── graceL1b2Accelerometer.cpp
│ │ │ ├── graceL1b2ClockOffset.cpp
│ │ │ ├── graceL1b2GnssReceiver.cpp
│ │ │ ├── graceL1b2Magnetometer.cpp
│ │ │ ├── graceL1b2Mass.cpp
│ │ │ ├── graceL1b2Orbit.cpp
│ │ │ ├── graceL1b2SatelliteTracking.cpp
│ │ │ ├── graceL1b2StarCamera.cpp
│ │ │ ├── graceL1b2StarCameraCovariance.cpp
│ │ │ ├── graceL1b2SteeringMirror.cpp
│ │ │ ├── graceL1b2Thruster.cpp
│ │ │ ├── graceL1b2TimeOffset.cpp
│ │ │ ├── graceL1b2Uso.cpp
│ │ │ ├── graceL1b2Vector.cpp
│ │ │ ├── graceSequenceOfEvents.cpp
│ │ │ ├── orbit2GraceL1b.cpp
│ │ │ ├── satelliteTracking2GraceL1b.cpp
│ │ │ └── starCamera2GraceL1b.cpp
│ │ ├── graceAod2TimeSplines.cpp
│ │ ├── grail/
│ │ │ ├── grailCdr2Orbit.cpp
│ │ │ ├── grailCdr2SatelliteTracking.cpp
│ │ │ └── grailCdr2StarCamera.cpp
│ │ ├── grid/
│ │ │ ├── griddedData2NetCdf.cpp
│ │ │ ├── griddedDataTimeSeries2NetCdf.cpp
│ │ │ ├── netCdf2GriddedData.cpp
│ │ │ ├── netCdf2GriddedDataTimeSeries.cpp
│ │ │ ├── netCdfInfo.cpp
│ │ │ └── oceanTidesDTU2GriddedData.cpp
│ │ ├── groopsAscii2Orbit.cpp
│ │ ├── hw2TideGeneratingPotential.cpp
│ │ ├── icgem2PotentialCoefficients.cpp
│ │ ├── iers2OceanPoleTide.cpp
│ │ ├── jplAscii2Ephemerides.cpp
│ │ ├── normalsSphericalHarmonics2Sinex.cpp
│ │ ├── orbit2GroopsAscii.cpp
│ │ ├── orbit2Sp3Format.cpp
│ │ ├── potentialCoefficients2Icgem.cpp
│ │ ├── psmslOceanBottomPressure2TimeSeries.cpp
│ │ ├── satellites/
│ │ │ ├── champ2AccStar.cpp
│ │ │ ├── champ2Orbit.cpp
│ │ │ ├── cosmic2OrbitStar.cpp
│ │ │ ├── jason2Starcamera.cpp
│ │ │ ├── sacc2Orbit.cpp
│ │ │ ├── sentinel2Orbit.cpp
│ │ │ ├── sentinel2StarCamera.cpp
│ │ │ ├── swarm2Starcamera.cpp
│ │ │ ├── terraSarTandem2Orbit.cpp
│ │ │ └── terraSarTandem2StarCamera.cpp
│ │ ├── sinex2Normals.cpp
│ │ ├── slr/
│ │ │ ├── cpf2Orbit.cpp
│ │ │ ├── crd2NormalPoints.cpp
│ │ │ ├── cstg2NormalPoints.cpp
│ │ │ ├── merit2FullRate.cpp
│ │ │ ├── merit2NormalPoints.cpp
│ │ │ ├── orbit2Cpf.cpp
│ │ │ ├── sinexEccentricties2SlrPlatform.cpp
│ │ │ ├── slrComModel2RangeBiasStationSatellite.cpp
│ │ │ └── slrSinexDataHandling2Files.cpp
│ │ ├── sp3Format2Orbit.cpp
│ │ ├── starCamera2Orbex.cpp
│ │ ├── tle2Orbit.cpp
│ │ ├── viennaMappingFunctionGrid2File.cpp
│ │ └── viennaMappingFunctionStation2File.cpp
│ ├── covariance/
│ │ ├── autoregressiveModel2CovarianceMatrix.cpp
│ │ ├── covarianceFunction2DigitalFilter.cpp
│ │ ├── covarianceFunction2PowerSpectralDensity.cpp
│ │ ├── covarianceMatrix2AutoregressiveModel.cpp
│ │ ├── covarianceMatrix2Correlation.cpp
│ │ └── powerSpectralDensity2CovarianceFunction.cpp
│ ├── deprecated/
│ │ ├── gnssAttitude2Orbex.cpp
│ │ ├── gnssPrn2SvnBlockVariables.cpp
│ │ ├── gnssStationInfoCreate.cpp
│ │ ├── gridRectangular2NetCdf.cpp
│ │ ├── netCdf2GridRectangular.cpp
│ │ ├── sinex2StationPosition.cpp
│ │ └── sinex2StationPostSeismicDeformation.cpp
│ ├── doodsonHarmonics/
│ │ ├── doodsonAdmittanceInterpolation.cpp
│ │ ├── doodsonAdmittanceTimeSeries.cpp
│ │ ├── doodsonArguments2TimeSeries.cpp
│ │ ├── doodsonHarmonics2GriddedAmplitudeAndPhase.cpp
│ │ ├── doodsonHarmonics2PotentialCoefficients.cpp
│ │ ├── doodsonHarmonicsCalculateAdmittance.cpp
│ │ ├── doodsonHarmonicsChangePartialTides.cpp
│ │ ├── modelEquilibriumTide.cpp
│ │ └── potentialCoefficients2DoodsonHarmonics.cpp
│ ├── gnss/
│ │ ├── gnssAntennaDefinition2ParameterVector.cpp
│ │ ├── gnssAntennaDefinition2Skyplot.cpp
│ │ ├── gnssAntennaDefinitionCreate.cpp
│ │ ├── gnssAntennaNormalsConstraint.cpp
│ │ ├── gnssAttitudeInfoCreate.cpp
│ │ ├── gnssBiasClockAlignment.cpp
│ │ ├── gnssEstimateClockShift.cpp
│ │ ├── gnssGlonassFrequencyNumberUpdate.cpp
│ │ ├── gnssProcessing.cpp
│ │ ├── gnssReceiverDefinitionCreate.cpp
│ │ ├── gnssResiduals2AccuracyDefinition.cpp
│ │ ├── gnssResiduals2Skyplot.cpp
│ │ ├── gnssResiduals2TransmitterAccuracyDefinition.cpp
│ │ ├── gnssSignalBias2Matrix.cpp
│ │ ├── gnssSimulateReceiver.cpp
│ │ ├── instrumentGnssReceiver2TimeSeries.cpp
│ │ └── parameterVector2GnssAntennaDefinition.cpp
│ ├── grace/
│ │ ├── ensembleAveragingScaleModel.cpp
│ │ ├── graceAntennaCenterCorrectionArcCovariance.cpp
│ │ ├── graceOrbit2TransplantTimeOffset.cpp
│ │ ├── graceSstResidualAnalysis.cpp
│ │ ├── graceSstScaleModel.cpp
│ │ ├── graceSstSpecialEvents.cpp
│ │ ├── graceThrusterResponse2Accelerometer.cpp
│ │ ├── instrumentSatelliteTrackingAntennaCenterCorrection.cpp
│ │ └── instrumentStarCameraAngularAccelerometerFusion.cpp
│ ├── gravityfield/
│ │ ├── gravityfield2AbsoluteGravity.cpp
│ │ ├── gravityfield2AreaMeanTimeSeries.cpp
│ │ ├── gravityfield2Deflections.cpp
│ │ ├── gravityfield2DegreeAmplitudes.cpp
│ │ ├── gravityfield2DegreeAmplitudesPlotGrid.cpp
│ │ ├── gravityfield2DisplacementTimeSeries.cpp
│ │ ├── gravityfield2EmpiricalCovariance.cpp
│ │ ├── gravityfield2Gradients.cpp
│ │ ├── gravityfield2GravityVector.cpp
│ │ ├── gravityfield2GridCovarianceMatrix.cpp
│ │ ├── gravityfield2GriddedData.cpp
│ │ ├── gravityfield2GriddedDataTimeSeries.cpp
│ │ ├── gravityfield2PotentialCoefficients.cpp
│ │ ├── gravityfield2PotentialCoefficientsTimeSeries.cpp
│ │ ├── gravityfield2SphericalHarmonicsVector.cpp
│ │ ├── gravityfield2TimeSplines.cpp
│ │ ├── gravityfield2TrendPotentialCoefficients.cpp
│ │ ├── gravityfieldCovariancesPropagation2GriddedData.cpp
│ │ ├── gravityfieldReplacePotentialCoefficients.cpp
│ │ └── gravityfieldVariancesPropagation2GriddedData.cpp
│ ├── griddedData/
│ │ ├── griddedData2AreaMeanTimeSeries.cpp
│ │ ├── griddedData2GriddedDataStatistics.cpp
│ │ ├── griddedData2GriddedDataTimeSeries.cpp
│ │ ├── griddedData2Matrix.cpp
│ │ ├── griddedData2PotentialCoefficients.cpp
│ │ ├── griddedData2SphericalDistance.cpp
│ │ ├── griddedData2TimeSeries.cpp
│ │ ├── griddedDataCalculate.cpp
│ │ ├── griddedDataConcatenate.cpp
│ │ ├── griddedDataCreate.cpp
│ │ ├── griddedDataInterpolate.cpp
│ │ ├── griddedDataReduceSampling.cpp
│ │ ├── griddedDataTimeSeries2GriddedData.cpp
│ │ ├── griddedDataTimeSeries2PotentialCoefficients.cpp
│ │ ├── griddedTopography2AtmospherePotentialCoefficients.cpp
│ │ ├── griddedTopography2PotentialCoefficients.cpp
│ │ ├── griddedTopographyEllipsoidal2Radial.cpp
│ │ ├── matrix2GriddedData.cpp
│ │ ├── matrixRectangular2GriddedData.cpp
│ │ └── timeSeries2GriddedData.cpp
│ ├── instruments/
│ │ ├── instrument2AllanVariance.cpp
│ │ ├── instrument2CovarianceFunctionVCE.cpp
│ │ ├── instrument2CrossCorrelationFunction.cpp
│ │ ├── instrument2Histogram.cpp
│ │ ├── instrument2PowerSpectralDensity.cpp
│ │ ├── instrument2RmsPlotGrid.cpp
│ │ ├── instrument2Scaleogram.cpp
│ │ ├── instrument2SpectralCoherence.cpp
│ │ ├── instrument2Spectrogram.cpp
│ │ ├── instrumentAccelerometer2ThermosphericDensity.cpp
│ │ ├── instrumentAccelerometerApplyEstimatedParameters.cpp
│ │ ├── instrumentAccelerometerEstimateBiasScale.cpp
│ │ ├── instrumentAccelerometerEstimateParameters.cpp
│ │ ├── instrumentApplyTimeOffset.cpp
│ │ ├── instrumentArcCalculate.cpp
│ │ ├── instrumentArcCrossStatistics.cpp
│ │ ├── instrumentArcStatistics.cpp
│ │ ├── instrumentConcatenate.cpp
│ │ ├── instrumentCovarianceCheck.cpp
│ │ ├── instrumentDetrend.cpp
│ │ ├── instrumentEarthRotation.cpp
│ │ ├── instrumentEstimateEmpiricalCovariance.cpp
│ │ ├── instrumentEstimateHelmertTransformation.cpp
│ │ ├── instrumentFilter.cpp
│ │ ├── instrumentInsertNAN.cpp
│ │ ├── instrumentMultiplyAdd.cpp
│ │ ├── instrumentReduceSampling.cpp
│ │ ├── instrumentRemoveEpochsByCriteria.cpp
│ │ ├── instrumentRemoveEpochsByTimes.cpp
│ │ ├── instrumentRemoveEpochsThruster.cpp
│ │ ├── instrumentResample.cpp
│ │ ├── instrumentRotate.cpp
│ │ ├── instrumentSetType.cpp
│ │ ├── instrumentStarCamera2AccAngularRate.cpp
│ │ ├── instrumentStarCamera2RollPitchYaw.cpp
│ │ ├── instrumentStarCamera2RotaryMatrix.cpp
│ │ ├── instrumentStarCameraMultiply.cpp
│ │ ├── instrumentStatisticsTimeSeries.cpp
│ │ ├── instrumentSynchronize.cpp
│ │ ├── instrumentWaveletDecomposition.cpp
│ │ └── localLevelFrame2StarCamera.cpp
│ ├── kalmanFilter/
│ │ ├── kalmanBuildNormals.cpp
│ │ ├── kalmanFilter.cpp
│ │ ├── kalmanSmoother.cpp
│ │ └── kalmanSmootherLeastSquares.cpp
│ ├── misc/
│ │ ├── digitalFilter2FrequencyResponse.cpp
│ │ ├── digitalFilter2ImpulseResponse.cpp
│ │ ├── earthOrientationParameterTimeSeries.cpp
│ │ ├── earthRotaryVectorTimeSeries.cpp
│ │ ├── eclipseFactor2GriddedData.cpp
│ │ ├── filterMatrixWindowedPotentialCoefficients.cpp
│ │ ├── functionsCalculate.cpp
│ │ ├── grs2PotentialCoefficients.cpp
│ │ ├── kaula2SigmaPotentialCoefficients.cpp
│ │ ├── kernel2Coefficients.cpp
│ │ ├── kernel2SigmaPotentialCoefficients.cpp
│ │ ├── kernelEvaluate.cpp
│ │ ├── magneticField2GriddedData.cpp
│ │ ├── matrixCalculate.cpp
│ │ ├── observationEquations2Files.cpp
│ │ ├── platformCreate.cpp
│ │ ├── potentialCoefficients2BlockMeanTimeSplines.cpp
│ │ ├── potentialCoefficients2DegreeAmplitudes.cpp
│ │ ├── radialBasisSplines2KernelCoefficients.cpp
│ │ ├── satelliteModelCreate.cpp
│ │ ├── synthesisSphericalHarmonicsMatrix.cpp
│ │ ├── temporalRepresentation2TimeSeries.cpp
│ │ ├── thermosphericState2GriddedData.cpp
│ │ ├── timeSeries2PotentialCoefficients.cpp
│ │ ├── timeSeriesCreate.cpp
│ │ └── variational2OrbitAndStarCamera.cpp
│ ├── normals/
│ │ ├── normalsAccumulate.cpp
│ │ ├── normalsBuild.cpp
│ │ ├── normalsBuildShortTimeStaticLongTime.cpp
│ │ ├── normalsCreate.cpp
│ │ ├── normalsEliminate.cpp
│ │ ├── normalsMultiplyAdd.cpp
│ │ ├── normalsRegularizationBorders.cpp
│ │ ├── normalsRegularizationSphericalHarmonics.cpp
│ │ ├── normalsReorder.cpp
│ │ ├── normalsReorderAndAccumulate.cpp
│ │ ├── normalsScale.cpp
│ │ ├── normalsSolverVCE.cpp
│ │ ├── normalsTemporalCombination.cpp
│ │ ├── parameterNamesCreate.cpp
│ │ └── parameterSelection2IndexVector.cpp
│ ├── orbit/
│ │ ├── orbit2ArgumentOfLatitude.cpp
│ │ ├── orbit2BetaPrimeAngle.cpp
│ │ ├── orbit2EarthFixedOrbit.cpp
│ │ ├── orbit2EclipseFactor.cpp
│ │ ├── orbit2Groundtracks.cpp
│ │ ├── orbit2Kepler.cpp
│ │ ├── orbit2MagneticField.cpp
│ │ ├── orbit2ThermosphericState.cpp
│ │ ├── orbitAddVelocityAndAcceleration.cpp
│ │ └── planetOrbit.cpp
│ ├── plot/
│ │ ├── plotDegreeAmplitudes.cpp
│ │ ├── plotGraph.cpp
│ │ ├── plotMap.cpp
│ │ ├── plotMatrix.cpp
│ │ └── plotSphericalHarmonicsTriangle.cpp
│ ├── preprocessing/
│ │ ├── preprocessingDualSst.cpp
│ │ ├── preprocessingGradiometer.cpp
│ │ ├── preprocessingPod.cpp
│ │ ├── preprocessingSst.cpp
│ │ ├── preprocessingVariationalEquation.cpp
│ │ ├── preprocessingVariationalEquationOrbitFit.cpp
│ │ └── preprocessingVariationalEquationSstFit.cpp
│ ├── program.cpp
│ ├── program.h
│ ├── programTemplate.cpp
│ ├── simulation/
│ │ ├── noiseAccelerometer.cpp
│ │ ├── noiseGriddedData.cpp
│ │ ├── noiseInstrument.cpp
│ │ ├── noiseNormalsSolution.cpp
│ │ ├── noiseOrbit.cpp
│ │ ├── noiseSatelliteTracking.cpp
│ │ ├── noiseStarCamera.cpp
│ │ ├── noiseTimeSeries.cpp
│ │ ├── simulateAccelerometer.cpp
│ │ ├── simulateAccelerometerCoMOffset.cpp
│ │ ├── simulateGradiometer.cpp
│ │ ├── simulateKeplerOrbit.cpp
│ │ ├── simulateOrbit.cpp
│ │ ├── simulateSatelliteTracking.cpp
│ │ ├── simulateStarCamera.cpp
│ │ ├── simulateStarCameraGnss.cpp
│ │ ├── simulateStarCameraGrace.cpp
│ │ ├── simulateStarCameraSentinel1.cpp
│ │ └── simulateStarCameraTerrasar.cpp
│ ├── slr/
│ │ └── slrProcessing.cpp
│ └── system/
│ ├── fileConvert.cpp
│ ├── fileCreateDirectories.cpp
│ ├── fileMove.cpp
│ ├── fileRemove.cpp
│ ├── fileTextCreate.cpp
│ ├── groupPrograms.cpp
│ ├── ifPrograms.cpp
│ ├── loopPrograms.cpp
│ └── runCommand.cpp
├── slr/
│ ├── slr.cpp
│ ├── slr.h
│ ├── slrDesignMatrix.cpp
│ ├── slrDesignMatrix.h
│ ├── slrNormalEquationInfo.cpp
│ ├── slrNormalEquationInfo.h
│ ├── slrObservation.cpp
│ ├── slrObservation.h
│ ├── slrParametrization/
│ │ ├── slrParametrization.cpp
│ │ ├── slrParametrization.h
│ │ ├── slrParametrizationConstraints.h
│ │ ├── slrParametrizationDynamicOrbits.cpp
│ │ ├── slrParametrizationDynamicOrbits.h
│ │ ├── slrParametrizationEarthRotation.cpp
│ │ ├── slrParametrizationEarthRotation.h
│ │ ├── slrParametrizationGravityField.cpp
│ │ ├── slrParametrizationGravityField.h
│ │ ├── slrParametrizationGroup.h
│ │ ├── slrParametrizationRangeBiasSatellite.h
│ │ ├── slrParametrizationRangeBiasSatelliteApriori.h
│ │ ├── slrParametrizationRangeBiasStation.h
│ │ ├── slrParametrizationRangeBiasStationApriori.h
│ │ ├── slrParametrizationRangeBiasStationSatellite.h
│ │ ├── slrParametrizationRangeBiasStationSatelliteApriori.h
│ │ ├── slrParametrizationStaticPositions.cpp
│ │ ├── slrParametrizationStaticPositions.h
│ │ ├── slrParametrizationTimeBias.cpp
│ │ ├── slrParametrizationTimeBias.h
│ │ ├── slrParametrizationTimeBiasApriori.h
│ │ ├── slrParametrizationTroposphere.cpp
│ │ └── slrParametrizationTroposphere.h
│ ├── slrPlatform.h
│ ├── slrProcessingStep/
│ │ ├── slrProcessingStep.cpp
│ │ ├── slrProcessingStep.h
│ │ ├── slrProcessingStepEstimate.h
│ │ ├── slrProcessingStepGroup.h
│ │ ├── slrProcessingStepPrintResidualStatistics.h
│ │ ├── slrProcessingStepSelectParametrizations.h
│ │ ├── slrProcessingStepSelectSatellites.h
│ │ ├── slrProcessingStepSelectStations.h
│ │ ├── slrProcessingStepWriteAprioriSolution.h
│ │ ├── slrProcessingStepWriteNormalEquations.h
│ │ ├── slrProcessingStepWriteResiduals.h
│ │ ├── slrProcessingStepWriteResults.h
│ │ ├── slrProcessingStepWriteUsedSatelliteList.h
│ │ └── slrProcessingStepWriteUsedStationList.h
│ ├── slrSatellite.h
│ ├── slrSatelliteGenerator/
│ │ ├── slrSatelliteGenerator.cpp
│ │ ├── slrSatelliteGenerator.h
│ │ ├── slrSatelliteGeneratorSatellites.cpp
│ │ └── slrSatelliteGeneratorSatellites.h
│ ├── slrStation.cpp
│ ├── slrStation.h
│ └── slrStationGenerator/
│ ├── slrStationGenerator.cpp
│ ├── slrStationGenerator.h
│ ├── slrStationGeneratorStations.cpp
│ └── slrStationGeneratorStations.h
├── sourcesCXX.txt
├── sourcesF77.txt
└── sourcesF90.txt
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/CODE_OF_CONDUCT.md
================================================
### Introduction
This Code of Conduct applies to all spaces managed by the Gravity Recovery Object Oriented Programming System (GROOPS) project, including all public and private mailing lists, issue trackers, discussions, wikis, blogs, meetings, workshops and any other channel used by our community.
This Code of Conduct should be honored by everyone who participates in the GROOPS community formally or informally, or claims any affiliation with the project, in any project-related activities and especially when representing the project, in any role.
This code is not exhaustive or complete. It serves to distill our common understanding of a collaborative, shared environment and goals. Please try to follow this code in spirit as much as in letter, to create a friendly and productive environment that enriches the surrounding community.
### Specific Guidelines
We strive to:
1. Be open. We invite anyone to participate in our community. We prefer to use public methods of communication for project-related messages, unless discussing something sensitive. This applies to messages for help or project-related support, too; not only is a public support request much more likely to result in an answer to a question, it also ensures that any inadvertent mistakes in answering are more easily detected and corrected.
2. Be empathetic, welcoming, friendly, and patient. We work together to resolve conflict, and assume good intentions. We may all experience some frustration from time to time, but we do not allow frustration to turn into a personal attack. A community where people feel uncomfortable or threatened is not a productive one. Most project members answer questions or investigate issues in their spare time, so be patient when waiting for a response.
3. Be collaborative. Our work will be used by other people, and in turn we will depend on the work of others. When we make something for the benefit of the project, we are willing to explain to others how it works, so that they can build on the work to make it even better. Any decision we make will affect users and colleagues, and we take those consequences seriously when making decisions.
4. Be inquisitive. Nobody knows everything! Asking questions early avoids many problems later, so we encourage questions, although we may direct them to the appropriate forum. We will try hard to be responsive and helpful.
5. Be careful in the words that we choose. We are careful and respectful in our communication, and we take responsibility for our own speech. Be kind to others. Do not insult or put down other participants. We will not accept harassment or other exclusionary behaviour, such as:
* Violent threats or language directed against another person.
* Sexist, racist, or otherwise discriminatory jokes and language.
* Posting sexually explicit or violent material.
* Posting (or threatening to post) other people’s personally identifying information (“doxing”).
* Sharing private content, such as emails sent privately or non-publicly, without the sender’s consent.
* Personal insults, especially those using racist or sexist terms.
* Unwelcome sexual attention.
* Excessive profanity. Please avoid swearwords; people differ greatly in their sensitivity to swearing.
* Repeated harassment of others. In general, if someone asks you to stop, then stop.
* Advocating for, or encouraging, any of the above behaviour.
### Diversity Statement
The GROOPS project welcomes and encourages participation by everyone. No matter how you identify yourself or how others perceive you: we welcome you. We are committed to being a community that everyone enjoys being part of. Although we may not always be able to accommodate each individual’s preferences, we try our best to treat everyone kindly.
Though no list can hope to be comprehensive, we explicitly honour diversity in: age, culture, ethnicity, genotype, gender identity or expression, language, national origin, neurotype, phenotype, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, subculture and technical ability, to the extent that these do not conflict with this Code of Conduct.
### Enforcement and Reporting Guidelines
We encourage all community members to resolve issues on their own whenever possible. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the [project committee](https://github.com/orgs/groops-devs/people).
In case of severe and obvious breaches, we will immediately disconnect the originator from the GROOPS communication channels. In cases not involving clear severe and obvious breaches of this Code of Conduct, the process for acting on any received Code of Conduct violation report will be:
1. Acknowledgement that the report has been received
2. Reasonable discussion/feedback
3. Mediation (if feedback didn’t help, and only if both reporter and reportee agree to this)
4. Enforcement via transparent decision by the [project committee](https://github.com/orgs/groops-devs/people) leading to one of the following resolutions:
1. Correction
A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
2. Warning
Consequence: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
3. Temporary Ban
Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
4. Permanent Ban
Consequence: A permanent ban from any sort of public interaction within the community.
### Attribution
This Code of Conduct is adapted from the [SciPy Code of Conduct](https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html), the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) and the [Contributor Covenant Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.yml
================================================
name: Bug Report
description: File a bug report.
labels: ["bug"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
Please make sure you have checked the [GROOPS documentation](https://groops-devs.github.io/groops/html/) before submitting a bug report.
- type: textarea
id: description
attributes:
label: Description
description: |
- A clear and concise description of what the bug is.
- Also tell us what you expected to happen.
- If possible, append a **minimum working example** (GROOPS config file plus any required data) that leads to the bug.
- Otherwise, include step-by-step instructions on how to reproduce the bug.
placeholder: Tell us what the issue is about.
validations:
required: true
- type: dropdown
id: version
attributes:
label: GROOPS version
description: What version of GROOPS are you running?
options:
- main (latest commit)
- Release 2021-09-06
- Release 2021-02-02
- Release 2020-11-12
- Other (please specify in the description)
validations:
required: true
- type: checkboxes
id: operating-systems
attributes:
label: Operating systems
description: Which operating systems are you using? You may select more than one.
options:
- label: Linux
- label: Windows
- label: macOS
- label: Other (please specify in the description)
- type: textarea
id: logs
attributes:
label: Log output
description: Please copy and paste any relevant log output. Run ```groops -l logfile.txt ``` to create a log file. If the log output is too long, please attach the log file in the description instead.
placeholder: Paste any relevant log output here.
render: dircolors
================================================
FILE: .github/ISSUE_TEMPLATE/config.yml
================================================
blank_issues_enabled: true
contact_links:
- name: Question
url: https://github.com/groops-devs/groops/discussions
about: Ask about GROOPS usage or other topics in the Discussions section.
- name: GROOPS Documentation
url: https://groops-devs.github.io/groops/html/
about: Please check the GROOPS documentation before opening a new issue.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.yml
================================================
name: Feature Request
description: Suggest a new feature.
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to suggest a new feature!
Please make sure you have checked the [GROOPS documentation](https://groops-devs.github.io/groops/html/) before submitting a feature request.
- type: textarea
id: description
attributes:
label: Description
description: |
- Is your feature request related to a problem? Please describe.
- Describe the solution you would like.
- Describe alternatives you have considered.
placeholder: Tell us about the feature.
validations:
required: true
================================================
FILE: .gitignore
================================================
# miscellaneous
# -------------
*.*~
*.bak
================================================
FILE: CHANGELOG.md
================================================
# main
- New program: Accelerometer2GraceL1b.
- New program: Orbit2GraceL1b.
- New program: SatelliteTracking2GraceL1b.
- New program: StarCamera2GraceL1b.
- New class: ParameterSelector: group.
- New option: conditionStringContains/MatchPattern: supports wildcards.
- Bugfix: ConditionFileExist: fixed bug with wildcards.
- Bugfix: GUI: fixed MatchCaseSensitive for schema selction.
- Bugfix: GUI: fixed bug when moving elements to same position.
- Bugfix: GNSS, WriteNormalEquations: check non-matching parameter selection.
- Bugfix: Instrument2AllanVariance: Fix sample counting.
- Bugfix: The parser was sometimes applied twice to filenames.
- Bugfix: Tle2Orbit: Fixed bug when first record in file is used.
- Other: NetCDF: enable to read multidimensional variables to store in muliple columns.
- Other: Changed ftp.tugraz.at/outgoing/ITSG to ftp.tugraz.at/pub/ITSG.
- Other: ModelEquilibriumTide: Taking into account spatially varying density.
- Other: GnssTroposphere2TropoSinex: Revised interface.
- Other: GnssAntex2AntennaDefinition: Handling of changing antennas of a SVN.
# Release 2025-11-15
- New program: StarCamera2Orbex (replaces GnssAttitude2Orbex).
- New program: DoodsonHarmonicsChangePartialTides.
- New program: Conversion: OceanTidesDTU2GriddedData.
- New program: GnssResiduals2TransmitterAccuracyDefinition.
- New program: SynthesisSphericalHarmonicsMatrix.
- New program: Gravityfield2GravityVector.
- New program: NormalsReorderAndAccumulate.
- New class: PlotDegreeAmplitudes: degreeAmplitudesSimple.
- New class: Loop: SortAndRemoveDuplicates of an arbitrary loop.
- New option: GnssAntennaNormalsConstraint: gnssType selection for TEC constraint.
- New option: PlotAxisLabeled: majorTickSpacing, minorTickSpacing, gridLineSpacing.
- New option: GNSS/SLR: optional disabling of stations without precise apriori positions.
- New option: condition->fileExist: minimumSize.
- New option: GNNS->parametrizationVTEC: optional gradient estimation.
- New option: GNNS->TransmitterGeneratorGnss: option to disable clock interpolation.
- File format: TideGeneratingPotential includes now degree 3 tides.
- File format: Each file is now readable/writable in JSON format as well.
- File format: Removed deprecated GnssStationInfo (replaced by Platform).
- Bugfix: GUI: fixed Ctrl+Shift+Up/Down for variables.
- Bugfix: slrParametrizationRangeBiasStationSatellite: Fix station index.
- Bugfix: parameterNames: fixed wrong order.
- Bugfix: GraceAod2DoodsonHarmonics: fixed phase error.
- Bugfix: GnssOrbex2StarCamera: reads now free format.
- Bugfix: GnssNormals2Sinex: fixed parser error.
- Bugfix: GnssParametrizationIonosphereSTEC: constant sigmaSTEC>0 was evaluated always to one.
- Bugfix: ParameterVector2GnssAntennaDefinition: Sometimes the parameters were assigned incorrectly.
- Bugfix: GNSS: Fixed bug in preprocessing when only one phase observation is available.
- Bugfix: GNSS: Fixed bug in gnssParametrizationTemporalBias when reading apriori values.
- Other: GUI: offer links for numbers and strings of different types.
- Other: GUI: Open multiple config files with the file selector.
- Other: GUI: Inform about deprecated elements.
- Other: gnss: set margin for polynomial orbit interpolation to 1e-7 seconds.
- Other: gnss: simulation considers more apriori models (e.g. TEC maps).
- Other: IGRF: Updated International Geomagnetic Reference Field (IGRF) to 14th Generation Release
- Other: GNSS: Improved setup of ambiguity parameters. Considers splitted network, splitted observations (e.g. L2LG, L2WG).
- Other: GNSS: More stable estimation of ambiguities. Faster ambiguity resolution (decorrelation step).
- Other: GNSS: range scale due to transmitter frequency offset/clock drift.
- Other: Store more double digits in xml format.
- Other: NormalsEliminate: regularize unused eliminated parameters.
- Other: Sinex2StationPosition: Apply frequencies to all intervals.
- Other: GNSS: apriori ionospheric map is interpolated in solar-geomagentic frame.
- Other: Tle2Orbit: Use closest record if multiple records for the same satellites are given.
- Other: Loop: revised interface.
- Other: Config: The variables groopsConfigFile and workingDir are set automatically.
# Release 2024-06-24
- Interface change: Variables can now declared at any place in the config file (not only in the global section).
They have local scope within the hierarchy level.
Loop and conditions attributed to elements no longer need to be declared in the global section.
- SLR Processing: New processing of SLR data with estimation of all relevant geodetic parameters.
- New program: SlrProcessing
- New program: conversion: Cpf2Orbit
- New program: conversion: Crd2NormalPoints
- New program: conversion: Cstg2NormalPoints
- New program: conversion: Merit2FullRate
- New program: conversion: Merit2NormalPoints
- New program: conversion: Orbit2Cpf
- New program: conversion: SinexEccentricties2SlrPlatform
- New program: conversion: SlrComModel2RangeBiasStationSatellite
- New program: conversion: SlrSinexDataHandling2Files
- New class: slrParametrization
- New class: slrProcessingStep
- New class: slrSatelliteGenerator
- New class: slrStationGenerator
- New class: In Troposphere: MendesAndPavlis
- New instrument file format: SATELLITELASERRANGING
- New instrument file format: METEOROLOGICAL
- New program: eclipseFactor2GriddedData.cpp: returns a global grid with eclipse factors.
- New program: orbit2EarhFixedOrbit: Rotate an orbit into a rotation earth fixed frame.
- New program: GriddedDataTimeSeries2PotentialCoefficients.
- New program: SinexMetadata2GlonassFrequencyNumber.
- New program: SimulateStarCameraTerrasar.
- New class: In PlatformSelector: Equipment.
- New class: In Gravityfield/Tides/MiscAccelerations: Group.
- New option: GnssAntennaDefinitionCreate: setZero and removeCenterMean.
- New option: Sp3Fromat2Orbit: Write all satellites (identifier is appended to each file).
- New option: GriddedData2PotentialCoefficients: Handle multiple input data columns at once.
- New option: GriddedDataReduceSampling: tesseroid volume conserving for digital elevation models.
- New option: Loop: optional condition for each loop step.
- New option: gnssParametrizationStaticPositions: no net scale constraint.
- New option: GnssProcessingStepResolveAmbiguities: partial ambiguity resolution with selectTransmitters/Receivers.
- New option: GnssParametrizationCodeBiases: option to define ionosphere-free clock datum.
- New option: GnssParametrizationIonosphereMap: option to read/write VTEC maps.
- New option: PlatformSelector: new selection algorithm with exclude option.
- New option: GoceXmlEggNom1b: write also linear common mode accelerations.
- Removed program: GnssPrn2SvnBlockVariables: This program will no longer work from the next release! See documentation for help.
- Removed program: NetCdf2PotentialCoefficients: Use NetCdf2GriddedDataTimeSeries and GriddedDataTimeSeries2PotentialCoefficients instead.
- Removed program: Metop2Starcamera: Use Champ2AccStar instead.
- Bugfix: GnssParametrizationStaticPositions: ignored inputfileNoNetPositions.
- Bugfix: SphericalHarmonicsFilter->DDK: corrected filter for level 4 onwards (Compatible with ICGEM definition).
- Bugfix: ObservationDeflections: fixed index bug.
- Bugfix: NormalEquationFile: corrected contribution computation.
- Bugfix: NetCdf: in some cases the data has been swapped.
- Bugfix: GnssRinexNavigation2OrbitClock: reimplementation of source code.
- Other: GnssAntennaDefinition2ParameterVector: with area weights.
- Other: GnssAntex2AntennaDefinition/GnssGlonassFrequencyNumberUpdate: write Platform files instead of deprecated GnssStationInfo files.
- Other: EarthRotation::rotaryAxis: if EOP time series is too short, use simplified equations.
- Other: Gravityfield2Deflections: Computation based now on normal gravity.
- Other: SimulateStarCamera: new optional nadir pointing mode.
- Other: SimulateOrbit: Improved temporal numerical precision.
- Other: GnssRinexNavigation2OrbitClock: add SBAS support.
- Other: Updated documentation to explain parameter names.
- Other: GriddedData: reworked area computations.
- Other: IersC04IAU2000EarthOrientationParameter: works also with new file format.
- Other: condition->fileExist: Supports now wildcards: * and ?.
- Other: Champ2Orbit: simplified interface.
# Release 2023-08-25
- New program: FileMove.
- New program: FileTextCreate.
- New program: InstrumentRemoveEpochsThruster.
- New program: InstrumentApplyTimeOffset.
- New program: InstrumentAccelerometerEstimateParameters (replaces InstrumentAccelerometerEstimateBiasScale).
- New program: GraceOrbit2TransplantTimeOffset.
- New program: GraceThrusterResponse2Accelerometer.
- New program: GnssReceiver2RinexObservation.
- New program: Sinex2StationPositions: replaces Sinex2StationPosition and Sinex2StationPostSeismicDeformation.
- New program: ObservationEquations2Files.
- New program: Variational2OrbitAndStarCamera (renamed Variational2Orbit): added possibility to also extract Earth rotation.
- New program: PlatformCreate: replaces gnssStationInfoCreate.
- New program: Tle2Orbit: Orbit from Two Line Elements (TLE).
- New program: GriddedData2NetCdf, GriddedDataTimeSeries2NetCdf (replaces GridRectangular2NectCdf).
- New program: NetCdf2GriddedData, NetCdf2GriddedDataTimeSeries (replaces NectCdf2DridRectangular).
- New program: DoodsonAdmittance2SupplementaryFiles: for publication of ocean tide models.
- New program: Gravityfield2TrendPotentialCoefficients: Estimate trend, annual, ...
- New class: In Loop: DirectoryListing.
- New class: In Loop: FileLines.
- New class: In InstrumentResample/interpolatorTimeSeries: FillGapsLeastSquaresPolynomialFit.
- New class: In MiscAccelerations: AtmosphericDragFromDensityFile.
- New class: In MiscAccelerations: RadiationPressure, replaces SolarRadiatinPressure, Albedo
- New class: In Loop: PlatformEquipment (replaces FileGnssStationInfo).
- Bugfix: Gravityfield2Deflections: fixed initialization bug.
- Bugfix: GnssRinexNavigation2OrbitClock: updated to be usable for rinex4 and fixes for rinex3.
- Bugfix: Plot: various fixes for GMT Versions >6.1.
- Bugfix: GriddedData: unwrap longitude before computing area elements.
- Bugfix: InstrumentResample/polynomial: corrected interpolation vs. extrapolation.
- Bugfix: GnssResiduals2Skyplot: consider also the PRN in type.
- Bugfix: GssProcessingStepDisableTransmitterShadowEpochs.
- Bugfix: Sinex2StationPositions: Correct sign for approx. positions.
- Other: Reworked of GRACE-(FO) L1A conversion.
- Other: GnssReceiver: Improved preprocessing (initial clock and cycleslip detection).
- Other: GnssParametrizationStaticPositions: better selection of noNet stations.
- Other: GnssParametrizationLeoDynamicOrbits: handling of mutliple arcs.
- Other: GnssRinexNavigation2OrbitClock: works also for glonass.
- Other: PlotMisc: set minimum size of points in legend to 3p.
- Other: Preparation to replace GnssStationInfo file format by the more general Platform file format.
- Other: Text parser: new {text/regex/replace}.
- Other: GUI: added new multi-line comment elements, revised code.
# Release 2022-07-28
- New programs: GriddedData2GriddedDataTimeSeries and GriddedDataTimeSeries2GriddedData.
- New programs: MagneticField2GriddedData and Orbit2MagneticField.
- New class: In MiscAccelerations: FromParametrization
- New option: GnssAntennaDefinitionCreate: rename antennas.
- New option: gnssReceiverGeneratorStationNetwork: inputfileClock.
- New option: gnssReceiverGenerator: print preprocessing infos.
- New option: GroupPrograms: silently and additional outputfileLog.
- New option: MatrixGenerator->NormalsFile: observationCount.
- Bugfix: instrument files: empty files are now compatible to other instrument types.
- Bugfix: gnssProcessingStep: uninitialized normalEquationInfo.
- Bugfix: gnssProcessingStep: wrong counting of observations.
- Bugfix: gnssProcessingStepForEachReceiverSeparately: variableReceiver was not set.
- Bugfix: gnssProcessingStepResolveAmbiguities: for writing empty ambiguity file.
- Bugfix: gnssParametrizationClocksModel: Fixed zero mean constraint.
- Bugfix: gnssParametrizationLeoDynamicOrbits: in parallel excecution.
- Bugfix: gnssParametrizationKinematicPositions: in parallel excecution.
- Bugfix: gnssTransmitter: noAntennaPatternFound->ignoreObservation not working correctly.
- Bugfix: gnssReceiver: Simulating GLONASS ambiguities now correctly considers frequency channel.
- Bugfix: sp3Format2Orbit: no/invalid orbit positions/velocities are now excluded.
- Bugfix: Conversion of GRACE L1B/L1A data: revised source code.
- Bugfix: loopFileAscii: Fixed uninitialized variable that could lead to the loop ending prematurely.
- Bugfix: GnssAntex2AntennaDefinition: Fixed handling of frequency RMS blocks.
- Other: File GriddedDataTimeSeries: includes now the last epoch; interval [...] instead of [...).
- Other: File TimeSplinesGravityfield: includes now the last epoch; interval [...] instead of [...).
- Other: Removed inputfileGlobal option.
- Other: GnssAttitude2Orbex: can now handle different sampling per satellite.
- Other: GnssRinexNavigation2OrbitClock/RinexObservation2GnssReceiver: Added basic support for RINEX v4.00.
- Other: gnssParametrization*DynamicOrbits: integration starts and ends with first/last valid epoch.
- Other: GnssLowEarthOrbiter: createTracks() before removing outlier epochs leads to less track splits.
- Other: GUI: Added 'Open Documentation' to the context menu.
# Release 2021-09-06
- Interface change: Complete redesign of GnssProcessing to make usage a little bit easier and more flexible.
- Direct use of orbits without integrating variational equations in case of fixed transmitters (e.g., PPP).
- New class to add flexible parametrizations to the normal equation system.
- New class to select transmitters/receivers for each parametrization.
- Unified all transmitter classes into single class and merged all transmitter data and metadata into one folder at https://ftp.tugraz.at/pub/ITSG/groops/data/gnss/.
- Example scenarios with config files at https://ftp.tugraz.at/pub/ITSG/groops/scenario/.
- Updated and expanded documentation and cookbooks to reflect all GNSS-related changes.
- New program: InstrumentAccelerometer2ThermosphericDensity: Estimate neutral density from accelerometer data.
- New class: In Thermosphere: new model nrlmsis2
- New class: In Condition: Matrix to evaluate matrix elements.
- New class: In PlotMapProjection: added Mollweide map projection.
- Bugfix: FileSatelliteModel: removed if-statement for shaded plates, not necessary when applying the algorithm following Sentman 1961
- Other: Expression parser: constants are now defined with brackets, e.g pi().
- Other: Additional constants in the mathematical parser like speed of light c().
# Release 2021-02-02
- Interface change: GnssProcessing, GnssSimulateReceiver: Removed intervals (use program within LoopPrograms instead).
- Interface change: SimulateStarCameraGnss: Full reimplementation with interface change.
Added support for all known attitude modes used by GPS, GLONASS, Galileo, BeiDou, and QZSS. Now requires GnssAttitudeInfo file.
- Interface change: Renamed program KalmanStaticTemporalNormals to NormalsBuildShortTimeStaticLongTime.
- New program: GnssAttitudeInfoCreate: Creates attitude info file used by SimulateStarCameraGnss.
- New program: PreprocessingDualSst: Analyze GRACE-FO KBR and LRI together.
- New class: In Observation: DualSstVariational to use GRACE-FO KBR and LRI together.
- New class: In ParametrizationGravity: LinearTransformation: Gravity field parametrization based on the linear transformation of another parametrizationGravity.
- New option: LoopPrograms: processCountPerIteration (when running the loop on multiple processes), parallelLog (output to screen/log files from all processes).
- New option: IfPrograms: elsePrograms (executed if condition evaluates to false).
- New option: GroupPrograms: catchErrors (prevents program termination on error and optionally runs additional programs, i.e. try-catch).
- Bugfix: Orbit2Kepler: Fixed angular output values (DEG2RAD -> RAD2DEG).
- Bugfix: GnssClockRinex2InstrumentClock: 9-character identifier field width is now used starting from v3.04, not (incorrectly) from v3.00.
- Bugfix: SphericalHarmonicsFilterMatrix: Input coefficient vector is now sorted correctly into filter matrix numbering.
- Bugfix: MatrixDistributed: choleskyInverse(): Fixed a bug with sparse matrices.
- Bugfix: Rectangular grids with one row or column (i.e. parallels or meridians) are now handled correctly.
- Bugfix: InstrumentEstimateEmpiricalCovariance: Computation of autocovariance now works as expected.
- Bugfix: Parallel: Multiple bugfixes and improvements for better support of different MPI implementations.
- Other: Gnss: Updated BeiDou signal definition according to RINEX 3.05 and added support for BeiDou composite types.
- Other: Sp3Format2Orbit: Added support for SP3d format.
- Other: LoopPrograms: continueAfterError now works in parallel execution.
- Other: Improved CMake installation process (see updated INSTALL.md). Now supports parallel compilation and install target.
# Release 2020-11-12
- Initial release
================================================
FILE: CITATION.bib
================================================
@article{Mayer-Gurr2021,
author = {Mayer-G{\"{u}}rr, Torsten and Behzadpour, Saniya and Eicker, Annette and Ellmer, Matthias and Koch, Beate and Krauss, Sandro and Pock, Christian and Rieser, Daniel and Strasser, Sebastian and S{\"{u}}sser-Rechberger, Barbara and Zehentner, Norbert and Kvas, Andreas},
doi = {https://doi.org/10.1016/j.cageo.2021.104864},
issn = {0098-3004},
journal = {Computers & Geosciences},
keywords = {GNSS processing,Gravity field recovery,Orbit determination},
pages = {104864},
title = {{GROOPS: A software toolkit for gravity field recovery and GNSS processing}},
url = {https://www.sciencedirect.com/science/article/pii/S009830042100159X},
year = {2021}
}
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to GROOPS
We appreciate all contributions such as improving the documentation, reporting or fixing bugs,
implementing new features and answering questions from users.
## Getting Started
### Improving Documentation and Examples
Please inform us about your intention to work on the documentation by
[creating an issue](https://github.com/groops-devs/groops/issues).
When your contribution is finished, please submit a Pull Request (PR)
to https://github.com/groops-devs/groops.
Your contribution will then be reviewed.
### Reporting and Fixing Bugs
If you encounter a bug, please let us know by [filing an issue](https://github.com/groops-devs/groops/issues).
Please include as much information as possible on how to reproduce the bug
and about your software environment (operating system, compiler version, GROOPS version).
If you want to provide a bug fix, please get in contact with us before you start coding.
The best way to do so is by posting your intent in the issue related to that bug.
When your contribution is finished, please submit a Pull Request (PR)
to https://github.com/groops-devs/groops.
Your contribution will then be reviewed.
### Implementing New Features
[Create an issue](https://github.com/groops-devs/groops/issues) and we will discuss together how to
best integrate the new functionality into the existing code base.
Once we together agree on a way forward, you can go ahead and implement it.
When your contribution is implemented and tested, please submit a Pull Request (PR)
to https://github.com/groops-devs/groops.
Your contribution will then be reviewed.
Reviewing large PRs is difficult and will take time, so please be patient.
### Answering User Questions
You can answer questions from users in the
[Discussions](https://github.com/groops-devs/groops/discussions) section.
================================================
FILE: INSTALL.md
================================================
# Installation
This guide provides step-by-step instructions for compiling and running GROOPS from scratch.
- [Get the GROOPS Source Code](#get-the-groops-source-code)
- [Microsoft Windows](#microsoft-windows)
- [Linux](#linux)
- [Ubuntu](#ubuntu)
- [OpenSUSE](#opensuse)
- [Arch Linux](#arch-linux)
## Overview
While GROOPS is intended to be a standalone software package, some functionality depends on external libraries.
The installation instructions provided further below include steps to install these dependencies.
Hard dependencies are:
- [the Expat XML parser](https://libexpat.github.io)
- an implementation of the Linear Algebra Package (LAPACK), for example:
- [OpenBLAS](https://github.com/xianyi/OpenBLAS)
- [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html)
Additional libraries extend the feature set of GROOPS and can be optionally enabled at compile time.
At the moment, these include:
- [NetCDF](https://www.unidata.ucar.edu/software/netcdf) for reading and writing NetCDF files
- [zlib](https://zlib.net) for reading and writing compressed files
- the Essential Routines for Fundamental Astronomy ([liberfa](https://github.com/liberfa/erfa)) for high-precision
Earth rotation
Another optional dependency is an implementation of the Message Passing Interface standard (MPI,
for example [MPICH](https://www.mpich.org/) or [Microsoft MPI](https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi)).
Resource intensive tasks and algorithms are designed and implemented to be optionally run in
parallel on distributed systems.
To visualize data sets, GROOPS requires an installation of the [Generic Mapping Tools (GMT)](https://www.generic-mapping-tools.org/).
## Get the GROOPS Source Code
You can download the source code of a specific version on the
[Releases](https://github.com/groops-devs/groops/releases) page, or
clone the repository to always get the latest updates:
```
git clone https://github.com/groops-devs/groops.git
```
## Microsoft Windows
GROOPS under Windows requires CMake, and 64bit C++14 and Fortan compilers.
A convenient way to install all required tools is through [MSYS2](https://www.msys2.org).
This installation guide assumes that the GROOPS source code is located in `C:\groops`.
1. Download the MSYS2 installer and follow the [installation guide](https://www.msys2.org/#installation).
2. **Important**: After successful installation, close the MSYS2 terminal from step 1 and open the **MSYS2 MinGW 64-bit terminal**
through `Start Menu > MSYS2 64-bit > MSYS2 MinGW 64-bit`.
The command prompt in the terminal window should now read `username@hostname MINGW64`.
3. Install compilers, cmake, expat, OpenBLAS, and LAPACK:
```
pacman -S mingw-w64-x86_64-toolchain mingw-w64-x86_64-gcc-fortran mingw-w64-x86_64-cmake expat mingw64/mingw-w64-x86_64-openblas
```
4. *(Optional)* Install the NetCDF library:
```
pacman -S mingw-w64-x86_64-netcdf
```
5. *(Optional)* Download and install liberfa:
5.1. Install the `tar` utility and required build tool:
```
pacman -S tar make
```
5.2. Download and build the ERFA library:
```
mkdir -p /c/groops/lib && cd /c/groops/lib
wget https://github.com/liberfa/erfa/releases/download/v1.7.0/erfa-1.7.0.tar.gz
tar -xvf erfa-1.7.0.tar.gz
cd erfa-1.7.0
./configure
make
make install
```
6. *(Optional)* Install Microsoft MPI:
6.1 Download and install the [Microsoft MPI](https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi).
6.2 Install the MSYS2 `msmpi` package:
```
pacman -S mingw-w64-x86_64-msmpi
```
7. Create the build directory and compile GROOPS:
```
mkdir /c/groops/source/build && cd /c/groops/source/build
cmake.exe .. -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="C:\groops"
mingw32-make.exe -j4
mingw32-make.exe install
```
8. Make sure to also read the [post-installation steps](#windows-post-installation-steps).
### Graphical User Interface (GUI)
The GROOPS GUI is based on [Qt](https://www.qt.io/) and is tested with Qt version 5.15.1.
We recommend installing the GROOPS GUI via the precompiled Windows binaries provided with
each [release](https://github.com/groops-devs/groops/releases).
In case you want to compile the GUI yourself, you need to:
1. [Download and install](https://www.qt.io/download-qt-installer) Qt (registration required).
2. When prompted to choose which Qt components to install, select `Select Package Categories > LTS` and
then select `Qt > Qt 5.15.1` or a newer version. Under `Developer and Designer Tools`, `Qt Creator Debugger Support`,
`Debugging Tools for Windows`, `cmake`, and `Ninja` should be selected automatically.
3. Open the project file `C:\groops\gui\groopsGui.pro` in Qt Creator and build the project.
### Generic Mapping Tools (GMT)
The Generic Mapping Tools (GMT) are an optional dependency of GROOPS and enable the generation of high-quality
figures.
GMT provides [Windows binaries](https://github.com/GenericMappingTools/gmt/releases) which can be easily installed.
The current GROOPS release is tested against GMT version 6.4.0.
### Windows post-installation steps
After the installation of GROOPS and GROOPS GUI, we recommend some post-installation configuration steps to make
working with GROOPS easier.
1. To use the GROOPS and GROOPS GUI binaries without directory prefix, you have to add the required
directories to the system path.
1.1. Open the Control Panel through the Windows Start Menu: `Windows System > Control Panel`.
1.2. In the Control Panel window, go to `User Accounts > User Accounts`.
1.3. There you should click on `Change my environment variables`, which will open a new window.
1.4. In the environment variable window, select `Path` and click `Edit...`. A pop-up window will appear
where you can add the following directories to your system path:
```
"C:\msys64\mingw64\bin"
"C:\groops\bin"
"C:\Program Files\Microsoft MPI\Bin"
```
In case you manually compiled the GUI, additionally add the directory:
```
"C:\Qt\5.15.1\mingw81_64\bin"
```
The `mingw64` path should be in the first line of the list.
2. *(Optional)* Set the environment variable `OPENBLAS_NUM_THREADS` or `OMP_NUM_THREADS` to the number of threads to use for matrix operations.
:warning: When running GROOPS in parallel, threaded BLAS/LAPACK libraries may conflict with MPI processes and cause a deterioration of performance. In that case, the number of threads for matrix operations should be set to 1.
GROOPS depends on data files such as Earth rotation, Love numbers, and wavelet coefficients.
An initial data set that is regularly updated is available on [our FTP server](https://ftp.tugraz.at/pub/ITSG/groops/).
You can choose between downloading the data directory or a single [zip file](https://ftp.tugraz.at/pub/ITSG/groops/data.zip) with the same content.
## Linux
Most Linux distributions provide all GROOPS dependencies through their package managers.
We provide a detailed installation guide for Ubuntu and OpenSUSE, the installation steps
are however very similar for other distributions.
### Ubuntu
The installation procedure for Ubuntu is representative for all Debian based distributions,
however the individual package names may differ.
Check your distribution's documentation to find the correct packages.
This installation guide assumes that the GROOPS source code is located in `$HOME/groops`.
1. First, make sure your system is up to date:
```
sudo apt update && sudo apt upgrade
```
2. Install dependencies and build tools:
```
sudo apt-get install g++ gfortran cmake libexpat1-dev libopenblas-dev
```
3. *(Optional)* Install the NetCDF development package:
```
sudo apt-get install libnetcdf-dev
```
4. *(Optional)* Install liberfa development packages:
```
sudo apt-get install liberfa-dev
```
5. *(Optional)* Install MPI development packages:
```
sudo apt-get install mpi-default-dev
```
6. Create the build directory and compile GROOPS:
```
mkdir $HOME/groops/source/build && cd $HOME/groops/source/build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME/groops
make -j4
make install
```
7. Make sure to also read the [post-installation steps](#linux-post-installation-steps).
#### Graphical User Interface (GUI)
The GROOPS GUI depends on Qt packages.
To install the required packages, run:
```
sudo apt-get install qtbase5-dev
```
Then, change into the `gui` directory and compile the source code:
```
cd $HOME/groops/gui
qmake
make
```
#### Generic Mapping Tools (GMT)
Ubuntu provides packages for the Generic Mapping Tools:
```
sudo apt-get install gmt gmt-gshhg
```
### OpenSUSE
1. First, make sure your system is up to date:
```
sudo zypper up
```
2. Install dependencies and build tools:
```
sudo zypper install gcc-c++ gcc-fortran cmake libexpat-devel openblas-devel
```
3. *(Optional)* Install the NetCDF development package:
```
sudo zypper install netcdf-devel
```
4. *(Optional)* Install liberfa development packages:
```
sudo zypper install erfa-devel
```
5. *(Optional)* Install MPI development packages:
```
sudo zypper install openmpi4 openmpi4-devel
```
6. Create the build directory and compile GROOPS:
```
mkdir $HOME/groops/source/build && cd $HOME/groops/source/build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME/groops
make -j4
make install
```
7. Make sure to also read the [post-installation steps](#linux-post-installation-steps).
#### Graphical User Interface (GUI)
The GROOPS GUI depends on Qt packages.
To install the required packages, run:
```
sudo zypper install libqt5-qtbase-devel
```
Then, change into the `gui` directory and compile the source code:
```
cd $HOME/groops/gui
qmake-qt5
make
```
#### Generic Mapping Tools (GMT)
The OpenSUSE packages for the Generic Mapping Tools are available in the `GEO` repository
(change OpenSUSE release version if necessary):
```
sudo zypper addrepo http://download.opensuse.org/repositories/Application:/Geo/15.4/ GEO
```
Then install the packages:
```
sudo zypper install GMT GMT-doc GMT-coastlines
```
### Arch Linux
GROOPS is packaged for the [Arch User Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository).
You can install the [groops-git](https://aur.archlinux.org/packages/groops-git/) package providing the core GROOPS executables,
and the [groopsgui-git](https://aur.archlinux.org/packages/groopsgui-git/) package providing the GUI and documentation.
The easiest way to do this is through an [AUR helper](https://wiki.archlinux.org/index.php/AUR_helpers). If you are using `yay`,
for example, you can install GROOPS and the GUI by executing:
```
yay -S groops-git groopsgui-git
```
If you want to develop for GROOPS, a manual installation is preferable.
This installation guide assumes that the GROOPS source code is located in `$HOME/groops`.
1. First, make sure your system is up to date:
```
sudo pacman -Syu
```
2. Install dependencies and build tools:
```
sudo pacman -S cmake gcc gcc-gfortran expat lapack zlib
```
3. *(Optional)* Install the NetCDF development package:
```
sudo pacman -S netcdf-cxx
```
4. *(Optional)* Install liberfa development packages. liberfa is available as an [AUR package](https://aur.archlinux.org/packages/erfa/).
5. *(Optional)* Install an MPI development package, for example `openmpi`:
```
sudo pacman -S openmpi
```
6. Create the build directory and compile GROOPS.
```
mkdir $HOME/groops/source/build && cd $HOME/groops/source/build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME/groops
make -j4
make install
```
7. Make sure to also read the [post-installation steps](#linux-post-installation-steps).
#### Graphical User Interface (GUI)
The GROOPS GUI depends on Qt packages.
To install the required packages, run:
```
sudo pacman -S qt5-base
```
Then, change into the `gui` directory and compile the source code:
```
cd $HOME/groops/gui
qmake
make
```
#### Generic Mapping Tools (GMT)
The Generic Mapping Tools are available from the [Arch User Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository).
Install the [gmt](https://archlinux.org/packages/extra/x86_64/gmt/) and [gmt-coast](https://aur.archlinux.org/packages/gmt-coast) packages.
### Linux post-installation steps
After the installation of GROOPS and GROOPS GUI, we recommend some post-installation configuration steps to make
working with GROOPS easier.
1. To use the GROOPS and GROOPS GUI binaries without directory prefix, you have to add the required
directories to the system path:
```
echo "export PATH=$PATH:$HOME/groops/bin" >> $HOME/.bashrc
source $HOME/.bashrc
```
2. *(Optional)* Set the environment variable `OPENBLAS_NUM_THREADS` or `OMP_NUM_THREADS` to the number of threads to use for matrix operations.
:warning: When running GROOPS in parallel, threaded BLAS/LAPACK libraries may conflict with MPI processes and cause a deterioration of performance. In that case, the number of threads for matrix operations should be set to 1.
GROOPS depends on data files such as Earth rotation, Love numbers, and wavelet coefficients.
An initial data set that is regularly updated is available on [our FTP server](https://ftp.tugraz.at/pub/ITSG/groops/).
You can choose between downloading the data directory or a single [zip file](https://ftp.tugraz.at/pub/ITSG/groops/data.zip) with the same content.
## Disabling external source files
While GROOPS is intended to be a standalone software package, some functionality depends on external software.
Information about external source code contained in the repository which is licensed differently can be found in the
[corresponding README](https://github.com/groops-devs/groops/blob/main/source/external/README.md).
To compile GROOPS without these external source files, variables can be passed to CMake in the configuration step,
for example:
```
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME/groops -DDISABLE_IERS=TRUE
```
Available variables are ```DISABLE_HWM14```, ```DISABLE_NRLMSIS```, ```DISABLE_JB2008```,
```DISABLE_IGRF```, ```DISABLE_IERS```, ```DISABLE_ERFA```, ```DISABLE_Z```, and ```DISABLE_NETCDF```.
Setting these to ```TRUE``` will skip compilation of the respective source files.
================================================
FILE: LICENSE
================================================
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
================================================
FILE: README.md
================================================

> [!IMPORTANT]
> The link for retrieving data has changed to [ftp.tugraz.at/pub/ITSG/groops](https://ftp.tugraz.at/pub/ITSG/groops).
The Gravity Recovery Object Oriented Programming System (GROOPS) is a software toolkit written in C++
that enables the user to perform core geodetic tasks.
Key features of the software include gravity field recovery from satellite and terrestrial data,
the determination of satellite orbits from global navigation satellite system (GNSS) measurements,
and the processing of GNSS constellations and ground station networks.
Most tasks and algorithms are (optionally) parallelized through the Message Passing Interface (MPI), thus
the software enables a smooth transition from single-CPU desktop computers to large distributed
computing environments for resource intensive tasks.
For an easy and intuitive setup of complex workflows, GROOPS contains a graphical
user interface where configuration files can be created and edited.
- [Citing GROOPS](#citing-groops)
- [Installation](#installation)
- [Getting Started](#getting-started)
- [Contributing](#contributing)
- [License](#license)
- [Contributors](#contributors)
## Citing GROOPS
If you use data sets computed with GROOPS in a publication or publish the data itself,
please cite our [reference paper](https://doi.org/10.1016/j.cageo.2021.104864):
*Mayer-Guerr, T., Behzadpour, S., Eicker, A., Ellmer, M., Koch, B., Krauss, S., Pock, C., Rieser, D., Strasser, S., Suesser-Rechberger, B., Zehentner, N., Kvas, A. (2021). GROOPS: A software toolkit for gravity field recovery and GNSS processing. Computers & Geosciences, 104864. https://doi.org/10.1016/j.cageo.2021.104864*
```
@article{Mayer-Gurr2021,
author = {Mayer-Guerr, Torsten and Behzadpour, Saniya and Eicker, Annette and Ellmer, Matthias and Koch, Beate and Krauss, Sandro and Pock, Christian and Rieser, Daniel and Strasser, Sebastian and Suesser-Rechberger, Barbara and Zehentner, Norbert and Kvas, Andreas},
doi = {https://doi.org/10.1016/j.cageo.2021.104864},
issn = {0098-3004},
journal = {Computers & Geosciences},
keywords = {GNSS processing,Gravity field recovery,Orbit determination},
pages = {104864},
title = {{GROOPS: A software toolkit for gravity field recovery and GNSS processing}},
url = {https://www.sciencedirect.com/science/article/pii/S009830042100159X},
year = {2021}
}
```
## Installation
GROOPS is written in C++ and contains some legacy Fortran code.
To enable an intuitive interaction with the software, GROOPS includes a
graphical user interface (GUI).
The GUI is also written in C++ and depends on the Qt toolkit.
A detailed installation guide for Microsoft Windows and various Linux distributions can be found
on the [Installation page](https://github.com/groops-devs/groops/blob/main/INSTALL.md).
## Getting Started
After a successful installation our [Documentation](https://groops-devs.github.io/groops/html/index.html)
is the perfect way to get familiar with the different features of GROOPS.
GROOPS depends on data files such as Earth rotation, Love numbers, and wavelet coefficients.
An initial data set that is regularly updated is available on [our FTP server](https://ftp.tugraz.at/pub/ITSG/groops/).
You can choose between downloading the data directory or
a single [zip file](https://ftp.tugraz.at/pub/ITSG/groops/data.zip) with the same content.
## Contributing
We appreciate all contributions such as improving the documentation, reporting or fixing bugs,
implementing new features. Answering user questions in the
[Discussions](https://github.com/groops-devs/groops/discussions) section is another great way
of contributing to the GROOPS community.
If you encounter a bug, please let us know by [filing an issue](https://github.com/groops-devs/groops/issues).
Please include as much information as possible on how to reproduce the bug
and about your software environment (operating system, compiler version, GROOPS version).
If you want to provide a bug fix or implement a new features,
please get in contact with us in the [Discussions](https://github.com/groops-devs/groops/discussions)
before you start coding.
## License
GROOPS is licensed under GPLv3, as found in the [LICENSE](https://github.com/groops-devs/groops/blob/main/LICENSE) file.
This license applies to all files in the repository unless otherwise indicated.
Information about external source code contained in the repository which is licensed differently can be found in the
[corresponding README](https://github.com/groops-devs/groops/blob/main/source/external/README.md).
## Contributors
Parts of GROOPS originate from developments in the Astronomical, Physical and Mathematical Geodesy Group
at the University of Bonn, Germany.
Since 2010 it is developed and maintained at Graz University of Technology, Austria.
Here is a list of current and past contributors:
[Torsten Mayer-Guerr](https://github.com/tmayerguerr), Annette Eicker, Daniel Rieser, Norbert Zehentner,
Christian Pock, [Matthias Ellmer](https://github.com/x49), Beate Koch, [Andreas Kvas](https://github.com/akvas), Saniya Behzadpour,
[Sebastian Strasser](https://github.com/sestras), Sandro Krauss, Barbara Suesser-Rechberger,
[Patrick Dumitraschkewitz](https://github.com/zhedumi), Felix Oehlinger, [Andreas Strasser](https://github.com/ajs8041)
================================================
FILE: bin/.gitignore
================================================
*
!.gitignore
================================================
FILE: docs/documentation.html
================================================
link to documentation
================================================
FILE: docs/documentationSource.html
================================================
link to documentation
================================================
FILE: docs/html/Accelerometer2GraceL1b.html
================================================
GROOPS - Accelerometer2GraceL1b
The text file inputfileHeader is placed at the beginning of the outputfile.
The text parser is applied so that all variables can be used.
In addition, the times of the data are available with the variables {epochmin}, {epochmax},
and {epochcount}.
This program computes the covariance structure of a random process represented by an AR model sequence.
The covariance matrix is determined by accumulating the normal equations of all AR models in autoregressiveModelSequence
and inverting the combined normal equation matrix.
For each output file in outputfileCovarianceMatrix,
the covariance matrix of appropriate time lag is saved (the first file contains the auto-covariance,
second file cross covariance and so on). The matrix for lag $h$ describes the covariance between $x_{t-h}$ and $x_{t}$, i.e. $\Sigma(t-h, t)$.
This program reads in CHAMP accelerometer and star camera data given in the special CHAMP format.
In case of CHAMP accelerometer and star camera data is both stored in one file.
A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf.
This program reads in cosmic orbit and star camera data given in the CHAMP format.
In case of cosmic orbit and star camera data is stored in one file.
A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf
Computes digital filter coefficients for a digital filter of given degree and
order. The filter coefficients are computed by fitting them to an approximated
impulse response represented by the cholesky factor of the covariance matrix.
The parameter warmup determines from which element of the cholesky matrix the
coefficients (default: half the covariance length) are fitted.
Per default, the program computes filter coefficients which generate colored noise
when applied to a white noise sequence. When decorrelationFilter is set,
a decorrelation filter is computed which yields white noise when applied to colored noise.
Name
Type
Annotation
outputfileFilter
filename
filter coefficients
inputfileCovariance
filename
first column: time steps, following columns: covariance functions
column
uint
Column with covariance function to be fitted
warmup
uint
number of samples until diagonal of Cholesky factor is flat (default: half covariance length)
numeratorDegree
uint
Maximum degree of numerator polynomial (MA constituent)
denominatorDegree
uint
Maximum degree of denominator polynomial (AR constitutent)
One sided Power Spectral Density (PSD) from a covariance function. The first column of inputfileCovarianceFunction
should contain the time lag in seconds.
Multiple covariance functions (in the following column)s are supported.
The output is a matrix with first column contains the frequency $[Hz]$ and the other columns the PSD $[unit^2/Hz]$.
Conversion between covariance function $c_j$ and PSD $p_k$ is performed by discrete cosine transformation:
\[
p_k = 2\Delta t\left(c_0 + c_{n-1} (-1)^k + \sum_{j=1}^{n-2} 2 c_j \cos(\pi jk/(n-1))\right).
\]
See also PowerSpectralDensity2CovarianceFunction.
Name
Type
Annotation
outputfilePSD
filename
first column: frequency [Hz], other columns PSD [unit^2/Hz]
inputfileCovarianceFunction
filename
first column: time steps, following columns: covariance functions
This program computes a VAR(p) model from empirical covariance matrices.
The inputfileCovarianceMatrix represent the covariance structure of the process:
the first file should contain the auto-covariance, the second the cross-covariance of lag one,
the next cross-covariance of lag two and so on.
Cross-covariance matrices $\Sigma_{\Delta_k}$ are defined as the cross-covariance between epoch $t-k$ and $t$.
If the process realizations $x_{t}$ are arrange by ascending time stamps
($\{\dots, x_{t-2}, x_{t-1}, x_{t}, x_{t+1}, x_{t+2},\dots\}$),
the covariance structure of the (stationary) process is therefore given by
\[
\begin{bmatrix}
\Sigma & \Sigma_{\Delta_1} & \Sigma_{\Delta_2} & \cdots \\
\Sigma_{\Delta_1}^T & \Sigma & \Sigma_{\Delta_1} & \cdots \\
\Sigma_{\Delta_2}^T & \Sigma_{\Delta_1}^T & \Sigma & \cdots \\
\vdots & \vdots & \vdots & \ddots \\
\end{bmatrix}.
\]
The estimate AR model is saved as single matrix outputfileAutoregressiveModel according to the GROOPS AR model conventions.
Name
Type
Annotation
outputfileAutoregressiveModel
filename
coefficients and white noise covariance of AR(p) model
This program computes the pearson correlation coefficient
\[
\rho_{ij} = \frac{\sigma_{ij}}{\sigma_i \sigma_j}
\]from a given covariance matrix stored in inputfileCovarianceMatrix.
The result is stored in outputfileCorrelationMatrix.
The time format of the CPF file is UTC.
The coordinate system used in the CPF format is usually represented in TRF.
If earthRotation is provided the data are transformed
from terrestrial (TRF) to celestial reference frame (CRF).
Compute amplitude-, phase-, group delay and frequency response of a digitalFilter cascade.
The outputfileResponse is a matrix with following columns:
freq $[Hz]$, ampl, phase $[rad]$, group delay $[-]$, real, imag.
When unwrapPhase is set to true, $2\pi$ jumps of the phase response are removed before writing the output to file.
The response of the filter cascade is given by the product of each individual frequency response:
\[
H(f) = \prod_f H_j(f).
\]Amplitude and phase response are computed from the frequency response via
\[
A(f) = |H(f)| \hspace{5pt}\text{and}\hspace{5pt} \Phi(f) = \arctan \frac{\mathcal{I}(H(f))}{\mathcal{R}(H(f))}.
\]The group delay is computed by numerically differentiating the phase response
\[
\tau_g(f_k) = \frac{1}{2} \left[\frac{\Phi(f_k) - \Phi(f_{k-1})}{2\pi(f_k-f_{k-1})} + \frac{\Phi(f_{k+1}) - \Phi(f_{k})}{2\pi(f_{k+1}-f_{k})}\right] \approx \frac{d\Phi}{df}\frac{df}{d\omega}.
\]The frequency vector for a length $N$ and a sampling $\Delta t$ is given by
\[
f_k = \frac{k}{N \Delta t}, \hspace{15pt} k \in \{0, \dots, \left\lfloor\frac{N+2}{2}\right\rfloor-1\}.
\]
See also DigitalFilter2ImpulseResponse.
Impulse response of a digitalFilter cascade.
The impulse response is computed by filtering a sequence with length samples and a unit impulse at index pulseLag.
The outputfileResponse is a matrix with the time stamp (zero at pulseLag)
in the first column and the impulse response $h_k$ in the second column.
The publication of an ocean tide model includes not only the atlas
in the form of spherical harmonics coefficients,
but also the matrix of Doodson multipliers (outputfileDoodsonMatrix)
and the outputfileAdmittanceMatrix.
The outputfileMajorTideList contains the fileNames
for each contituent.
The required information is taken from the
inputfileAdmittance.
To visualize the interpolation of the minor tides.
The output is a matrix with the first column containing the tidal frequency,
the second column is the tide generating amplitude (from inputfileTideGeneratingPotential), and the following
columns the contribution of the major tides to the this tidal frequency as defined in in inputfileAdmittance.
Figure: Linear interpolation of minor tides in the diurnal band.
To visualize the interpolation of the minor tides it computes cosine multipliers of all major tides.
Without admittance this would be a simple cos oscillation.
The outputfileTimeSeries is an instrument file (MISCVALUES) containining the cos of all the major tides.
Figure: Cosine of the Mf tidal frequency with modulation from the interpolated minor tides.
Time series of doodson/fundamental arguments.
The outputfileTimeSeries contains the six Doodson arguments,
followed by the five fundamental arguments in radians.
Name
Type
Annotation
outputfileTimeSeries
filename
each epoch: 6 doodson args, 5 fundamental args [rad]
This program reads a inputfileDoodsonHarmonics and evaluates a single tidal
constituent selected by dooddson (Doodson number or Darwin´s name, e.g. 255.555 or M2).
This program computes the amplitude and phase from the cos and sin coefficients on
a given grid. The type of functional (e.g gravity anomalies or geoid heights)
can be choosen with kernel.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
To visualize the results use PlotMap.
The inputfileDoodsonHarmonics contains a Fourier series of a time variable
gravitational potential at specific tidal frequencies (tides)
\[
V(\M x,t) = \sum_{f} V_f^c(\M x)\cos(\theta_f(t)) + V_f^s(\M x)\sin(\theta_f(t)),
\]where $V_f^c(\M x)$ and $V_f^s(\M x)$ are spherical harmonics expansions.
If set the expansions are limited in the range between minDegree
and maxDegree inclusivly. The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
The outputfilePotentialCoefficients is not a single file but a series of files.
For each spherical harmonics expansion $V_f^c(\M x)$ and $V_f^s(\M x)$ a separate file is created
where the variables variableLoopName, variableLoopDoodson, variableLoopCosSin are set accordingly.
The file name should contain these variables, e.g. coeff.{name}.{doodson}.{cossin}.gfc.
If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied to the cos/sin
potentialCoefficients before.
Name
Type
Annotation
outputfilePotentialCoefficients
filename
variableLoopName
string
variable with darwins's name of each constituent
variableLoopDoodson
string
variable with doodson code of each constituent
variableLoopCosSin
string
variable with 'cos' or 'sin' of each constituent
variableLoopIndex
string
variable with index of each constituent (starts with zero)
variableLoopCount
string
variable with total number of constituents
inputfileDoodsonHarmonics
filename
inputfileTideGeneratingPotential
filename
to compute Xi phase correction
minDegree
uint
maxDegree
uint
GM
double
Geocentric gravitational constant
R
double
reference radius
applyXi
boolean
apply Doodson-Warburg phase correction (see IERS conventions)
Computes a timeSeries (GPS time) of Earth Orientation Parameter (EOP).
The instrument file (MISCVALUES) contains the elements at each epoch in the following order:
$x_p$ [rad]
$y_p$ [rad]
$s_p$ [rad]
$UT1-UTC$ [seconds]
length of day (LOD) [seconds]
$X$ [rad]
$Y$ [rad]
$S$ [rad]
The values are in situ values with all corrections and models applied. The time series can be used to
precompute Earth rotation with a low temporal resolution (e.g. 10 min) and reuse the file in
earthRotation:file to interpolate the data to the needed epochs
(e.g. to rotate orbit data). As some Earth rotation models are quite slow this can accelerate the computation.
Name
Type
Annotation
outputfileEOP
filename
each row: mjd(GPS), xp, yp, sp, dUT1, LOD, X, Y, S
Computes a outputfileTimeSeries of Earth's rotary axis
and its temporal derivative at timeSeries (GPS time).
The instrument file (MISCVALUES) contains the elements at each epoch in the following order:
This program converts the output of a eclipse model on a given
grid. The time for the evaluation can be specified in time.
The values will be saved together with points expressed as ellipsoidal coordinates
(longitude, latitude, height) based on a reference ellipsoid with parameters R
and inverseFlattening.
This programs estimate satellite-to-satellite-tracking (SST) deterministic signals due to eclipse transits from residuals.
The ensemble averaging method is used to characterize the average properties of signal shapes across all transit events.
Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval.
Name
Type
Annotation
outputfileScaleModel
filename
inputfileGrace1EclipseFactor
filename
GRACE-A eclipse factors computed with integrated orbit
inputfileGrace2EclipseFactor
filename
GRACE-B eclipse factors computed with integrated orbit
Converts GROOPS file between different file formats (ASCII, XML, JSON, binary),
see file formats for details.
With an additional extension of '.gz' files are directly compressed and uncompressed.
It prints also some information about the content.
Therefore it can be used to get an idea about the content of binary files.
Name
Type
Annotation
outputfile
filename
GROOPS formats: .xml, .txt, .json, .dat (optional with additional .gz)
inputfile
filename
GROOPS formats: .xml, .txt, .json, .dat (optional with additional .gz)
Create a spherical harmonic window matrix. The window matrix $\mathbf{W}$ is generated in space domain through
spherical harmonic synthesis and analysis matrices.
The resulting linear operator can be written as
\[
\mathbf{W} = \mathbf{K} \mathbf{A} \mathbf{\Omega} \mathbf{S} \mathbf{K}^{-1}.
\]Here, $\mathbf{K}$ is a diagonal matrix with the kernel coefficients on the main diagonal,
$\mathbf{S}$ is the spherical harmonic synthesis matrix, $\mathbf{\Omega}$ is defined by the values in
inputfileGriddedData and the
expression value, $\mathbf{A}$ is the spherical harmonic analysis matrix.
The resulting window matrix is written to a matrix file.
The spherical harmonic degree range, and coefficient numbering are defined by
minDegree, maxDegree, and numbering.
Note that a proper window function $\mathbf{\Omega}$ should contain values in the range [0, 1].
The window function $\mathbf{\Omega}$ can feature a smooth transition between 0 and 1 to avoid ringing effects.
Name
Type
Annotation
outputfileWindowMatrix
filename
inputfileGriddedData
filename
gridded data which defines the window function in space domain
value
expression
expression to compute the window function (input columns are named data0, data1, ...)
This program manipulates matrix files with data in columns.
If several inputfiles are given the data columns are copied side by side.
All inputfiles must contain the same number of rows.
The columns are enumerated by data0, data1, … .
The content of outputfile is controlled by outColumn.
The algorithm to compute the output is as follows:
The expressions in outColumn are evaluated once for each row of the input.
The variables data0, data1, … are replaced by the according values from the input columns before.
Additional variables are available, e.g. index, data0rms, see dataVariables.
If no outColumn are specified all input columns are used instead directly.
For a simplified handling constants can be defined by name=value, e.g. annual=365.25.
It is also possible to estimate parameters in a least squares adjustment.
The leastSquares serves as template for observation equations for every row.
The expression leastSquares is evaluated for each row in the inputfile.
The variables data0, data1, … are replaced by the according values from the input columns before.
In the next step the parameters are estimated in order to minimize the expressions in leastSquares
in the sense of least squares.
Afterwards complete rows are removed if one of the removalCriteria expressions for this row evaluates true (not zero).
An extra statistics file can be generated with one row of data. For the computation of the outColumn values
all dataVariables are available (e.g. data3mean, data4std)
inclusively the constants and estimated parameters but without the data0, data1, … itself.
The variables and the numbering of the columns refers to the outputfile.
First example: To calculate the mean of two values at each row set outColumn to 0.5*(data1+data0).
Second example: An input file contain a column with times and a column with values.
To remove a trend from the values define the parameters trend and bias.
The observation equation in leastSquares is data1 - (trend*data0+bias).
For output you can define the following columns for example:
outColumn=data0: points in time.
outColumn=data1: the values itself.
outColumn=trend*data0+bias: the linear fit.
outColumn=data1-trend*data0-bias: the residuals.
The extra statistics file could contain in this case:
outColumn=data0max-data0min: time span.
outColumn=bias: estimated parameter.
outColumn=trend: estimated parameter.
outColumn=data3rms: root mean square of the residuals.
The provided values at the area weighted grid points of the pattern of each gnssType are used as pseudo-observations.
A subset of patterns can be selected with types.
Produce a skyplot of antenna center variations
which can be plotted with PlotMap.
The first antenna from inputfileAntennaDefinition
matching the wildcard patterns of name, serial, radome is used.
For each antenna pattern (gnssType) a separate data column is computed.
A subset of patterns can be selected with types.
Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file.
The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same
as in grid and PlotMap.
Figure: Antenna Center Variations of ASH701945D\_M for two frequencies of GPS and GLONASS
Create a GNSS antenna definition file (Antenna Center Variations, ACV) consisting of multiple antennas.
The antennas can be created from scratch or can be selected from existing files.
This program can also be used to modify existing files.
Furthermore it can be used to create accuracy definition files containing azimuth and elevation dependent accuracy values for antennas.
To create an accuracy pattern for phase observations with 1 mm accuracy at zenith and no azimuth dependency, define a
pattern with type=L, values=0.001/cos(zenith/rho).
The antennas in outputfileAntennaDefinition
are sorted by names and duplicates are removed (first one is kept).
Apply constraints to normal equations
containing antennaCenterVariations.
Usually the antenna center variations are estimated together with other parameters
like station coordinates, signal biases and slant TEC in GnssProcessing.
This results in a rank deficient matrix as not all parameters can be separated.
The deficient can be solved by adding pseudo observation equations as constraints.
To separate antenna center variations and signal biases
apply constraint:mean for each GNSS type.
The observation equation for the integral mean of antenna center variations (ACV)
in all azimuth $A$ and elevation $E$ dependent directions
\[
0 = \iint ACV(A,E)\, d\Phi \approx \sum_i ACV(A_i,E_i)\, \Delta\Phi_i
\]is approximated by a grid defined by
deltaAzimuth, deltaZenith, and maxZenith.
To separate from station coordinates use constraint:centerMean
and from slant TEC parameters use constraint:TEC.
The constraints are applied separately to all antennas matching
the wildcard patterns of name, serial, radome.
Creates attitude info file (Instrument(MISCVALUES))
used by SimulateStarCameraGnss. One or more attitudeInfos can be specified.
They are valid from timeStart until the start of the subsequent attitudeInfo.
maxManeuverTime is used by SimulateStarCameraGnss to look
for ongoing orbit maneuvers before/after the given orbit that might affect the attitude at
the beginning or end of a given orbit.
Figure: Overview of attitude modes used by GNSS satellites
Here is a list of GNSS satellite types for which the attitude behavior is known and their
respective attitude modes and required parameters:
GPS-II/IIA [1]
defaultMode: nominalYawSteering
midnightMode: shadowMaxYawSteeringAndRecovery
noonMode: catchUpYawSteering
maxYawRate: 0.12 deg/s
yawBias: 0.5 deg
maxManeuverTime: 2 h
GPS-IIR/IIR-M [1]
defaultMode: nominalYawSteering
midnightMode: catchUpYawSteering
noonMode: catchUpYawSteering
maxYawRate: 0.2 deg/s
maxManeuverTime: 30 min
GPS-IIF [2]
defaultMode: nominalYawSteering
midnightMode: shadowConstantYawSteering
noonMode: catchUpYawSteering
maxYawRate: 0.11 deg/s
yawBias: -0.7 deg
maxManeuverTime: 1.5 h
GLO-M [3]
defaultMode: nominalYawSteering
midnightMode: shadowMaxYawSteeringAndStop
noonMode: centeredMaxYawSteering
maxYawRate: 0.25 deg/s
noonBetaThreshold: 2 deg
maxManeuverTime: 1.5 h
GAL-1 [4]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering1
noonMode: smoothedYawSteering1
maxManeuverTime: 1.5 h
GAL-2 [4]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 4.1 deg
noonBetaThreshold: 4.1 deg
activationThreshold: 10 deg
maxManeuverTime: 5656 s
BDS-2G/3G [5, 6]
defaultMode: orbitNormal
midnightMode: orbitNormal
noonMode: orbitNormal
BDS-2I [5]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.085 deg/s
midnightBetaThreshold: 4 deg
noonBetaThreshold: 4 deg
activationThreshold: 5 deg
maxManeuverTime: 24 h
BDS-2M [5]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.159 deg/s
midnightBetaThreshold: 4 deg
noonBetaThreshold: 4 deg
activationThreshold: 5 deg
maxManeuverTime: 13 h
BDS-3I/3SI [6]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 3 deg
noonBetaThreshold: 3 deg
activationThreshold: 6 deg
maxManeuverTime: 5740 s
BDS-3M/3SM [6]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 3 deg
noonBetaThreshold: 3 deg
activationThreshold: 6 deg
maxManeuverTime: 3090 s
QZS-1 [7]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.01 deg/s
yawBias: 180 deg
midnightBetaThreshold: 20 deg
noonBetaThreshold: 20 deg
activationThreshold: 18.5 deg
maxManeuverTime: 24 h
QZS-2G [7]
defaultMode: orbitNormal
midnightMode: orbitNormal
noonMode: orbitNormal
yawBias: 180 deg
QZS-2I [7]
defaultMode: nominalYawSteering
midnightMode: centeredMaxYawSteering
noonMode: centeredMaxYawSteering
maxYawRate: 0.055 deg/s
midnightBetaThreshold: 5 deg
noonBetaThreshold: 5 deg
maxManeuverTime: 1.5 h
Some specific satellites may deviate in their attitude behavior or parameters
(e.g. G013-G040, R713, C005, C015, C017, J001).
This program can be used to absolutely align GNSS transmitter clocks to reference clocks (i.e. broadcast clocks).
Each 'group' of transmitters, usually a system like GPS or Galileo, is aligned individually by a constant shift over all transmitters.
If alignClocksByFreqNo is set, GLONASS transmitters will be divided by frequency number into groups of nominally two transmitters.
The offset between clocks and reference clocks will be shifted into receiver code biases, if receiver is provided."
By setting alignFreqNoBiasesAtReceiver and providing receiver, this program can further align GLONASS transmitter signal
biases so that the differences between frequency number-dependent receiver signal biases are minimal, which helps if PPP users don't set
up individual signal biases per frequency number at the receiver. Alignment is done by computing signal bias residuals to the mean over all
frequency numbers of a signal type at each receiver and then computing the means over all receivers for each frequency number and shifting
those from the receiver signal biases to the transmitter signal biases. Internal consistency of the biases is not affected by this.
Converts GNSS clocks from GROOPS format to IGS clock RINEX format.
Clocks can be provided via satelliteData and/or stationData.
Observed signal types are inferred from inputfileSignalBias.
Satellites/stations used as clock references can be provided via referenceClock.
See IGS clock RINEX format description for further details on header information.
Name
Type
Annotation
outputfileClockRinex
filename
satelliteData
sequence
one element per satellite
inputfileClock
filename
clock instrument file
inputfileSignalBias
filename
signal bias file
identifier
string
PRN (e.g. G23)
stationData
sequence
one element per station
inputfileClock
filename
clock instrument file
inputfilePosition
filename
station position file
inputfileStationInfo
filename
station info file
identifier
string
station name (e.g. wtzz)
comment
string
comment in header
program
string
name of program (for first line)
institution
string
name of agency (for first line)
analysisCenter
string
name of analysis center
differentialCodeBias
string
program and source for applied differential code bias
phaseCenterVariations
string
program and source for applied phase center variations
This program converts clocks from the IGS clock RINEX format,
which contains the clocks of all satellites and stations in a single file,
into an instrument file (MISCVALUE) for each identifier
(satellite and/or station).
Requires polar motion, polar motion rate, dUT1 and LOD parameters in the solution
vector inputfileSolution and their sigmas in inputfileSigmax.
Solution usually comes out of GnssProcessing.
Name
Type
Annotation
outputfileIgsErp
filename
IGS ERP file
epoch
sequence
e.g. daily solution
inputfileSolution
filename
parameter vector
inputfileSigmax
filename
standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation)
inputfileParameterNames
filename
parameter names
inputfileTransmitterList
filename
transmitter PRNs used in solution (used for transmitter count)
inputfileStationList
filename
stations used in solution (used for station count)
This program estimates an epoch-wise clock shift in a constellation of GNSS satellites.
Each separate data represents a satellite... (e.g. 32 GPS satellites).
The shift to reference clocks can be estimated by providing inputfileInstrumentRef.
Clock shifts are estimated for each epoch given by timeSeries.
Converts GNSS satellite attitude from ORBEX file format
(quaternions) to instrument file (STARCAMERA).
The resulting star camera files contain the rotation from satellite body frame to TRF, or to CRF in case
earthRotation is provided.
This program processes GNSS observations. It calculates the linearized observation equations,
accumulates them into a system of normal equations and solves it.
The observation epochs are defined by timeSeries
and only observations at these epochs (within a timeMargin) are considered.
To calculate observation equations from the tracks, the model parameters or unknown parameters need to be
defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined
parametrization.
Some of the parametrization also include a priori models.
Lastly it is required to define the process flow of the gnssProcessing. This is accomplished
with a list of processingSteps.
Each step is processed consecutively. Some steps allow the selection of parameters, epochs,
or the normal equation structure, which affects all subsequent steps.
A minimal example consists of following steps:
estimate: iterative float solution with outlier downeighting
resolveAmbiguities:
fix ambiguities to integer and remove them from the normals
estimate: few iteration for final outlier downweighting
Compute antenna accuracies from observation inputfileResiduals.
The inputfileStationInfo is needed to assign
the residuals to the equipped antenna at observation times.
The outputfileAccuracyDefinition contains
at first step the same accuracy information for all antennas as the input file.
Only the azimuth $A$ and elevation $E$ dependent grid points of the patterns
where enough residuals are available ($>$ minRedundancy)
are replaced by estimated accuracy
\[
\sigma(A,E) = \sqrt{\frac{\sum_i e_i^2(A,E)}{\sum_i r_i(A,E)}},
\]where $e_i$ are the azimuth and elevation dependent residuals and $r_i$ the
corresponding redundancies (number of observations minus the contribution to
the estimated parameters).
Example: Analysis of TerraSAR-X residuals of one month shows that low elevation
GPS satellites are not tracked by the onboard receiver. An estimation of accuracies
for these directions is not possible from the residuals and the apriori accuracies
are left untouched. The other directions show very low phase noise hardly elevation
and azimuth dependent for L2W. A nearly zero mean indicates the use of adequate antennca
center variations in the processing.
Figure: L2W accuracies of TerraSAR-X determined from residuals of one month
Write GNSS residuals together with azimuth and elevation to be plotted with PlotMap.
Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file.
The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same
as in PlotMap. If with typeTransmitter (e.g. '***G18')
a single transmitter is selected the azimuth and elevation are computed from the transmitter point of view.
For each GNSS type an extra data column is created.
A GNSS residual file includes additional information
besides the residuals, which can also be selected with type
A1*, E1*: azimuth and elevation at receiver
A2*, E2*: azimuth and elevation at transmitter
I**: Estimated slant total electron content (STEC)
Furthermore these files may include for each residual type
information about the redundancy and the accuracy relation $\sigma/\sigma_0$
of the estimated $\sigma$ versus the apriori $\sigma_0$ from the least squares adjustment.
The 3 values (residuals, redundancy, $\sigma/\sigma_0$) are coded with the same type.
To get access to all values the corresponding type must be repeated in type.
Figure: GPS C2W residuals of GRAZ station at 2012-01-01
Compute antenna accuracies from observation inputfileResiduals.
The inputfileTransmitterInfo is needed to assign
the residuals to the equipped antenna at observation times.
The outputfileAccuracyDefinition contains
at first step the same accuracy information for all antennas as the input file.
Only the azimuth $A$ and elevation $E$ dependent grid points of the patterns
where enough residuals are available ($>$ minRedundancy)
are replaced by estimated accuracy
\[
\sigma(A,E) = \sqrt{\frac{\sum_i e_i^2(A,E)}{\sum_i r_i(A,E)}},
\]where $e_i$ are the azimuth and elevation dependent residuals and $r_i$ the
corresponding redundancies (number of observations minus the contribution to
the estimated parameters).
Evaluates orbit and clock parameters from RINEX (version 2, 3, and 4)
navigation file inputfileRinex at epochs given by timeSeries and writes them to
outputfileOrbit and outputfileClock, respectively.
Orbits are rotated from TRF (as broadcasted) to CRF via earthRotation,
but system-specific TRFs (WGS84, PZ-90, etc.) are not aligned to a common TRF.
Furthermore, option is available to remove any satellite ephemeris data that has their satellite flag set to unhealthy.
Computes signal biases for a given list of types.
If the type list is empty, all types contained in inputfileSignalBias are used.
The resulting outputfileMatrix contains a vector with an entry for each type.
Convert GNSS signal biases from GROOPS format to IGS SINEX Bias format.
Biases can be provided via transmitterBiases and/or receiverBiases.
Phase biases without attribute (e.g. L1*) are automatically expanded so each code
bias has a corresponding phase bias
(Example: C1C, C1W, L1* are converted to C1C, C1W, L1C, L1W).
Time-variable biases (e.g. GPS L5 satellite phase bias) can be provided via timeVariableBias.
Their time span will be based on the provided epochs ($t \pm \Delta t / 2$).
The slope of the bias can be optionally provided in the second data column.
If GLONASS receiver biases depend on frequency number, those must be defined in inputfileTransmitterInfo
to get the correct PRN/SVN assignment to the biases.
See IGS SINEX Bias format description for further details on header information.
This program simulates observations from receivers to GNSS satellites.
These simulated observations can then be used in GnssProcessing, for example to conduct closed-loop simulations.
One or more GNSS constellations must be defined via transmitter.
Receivers such as ground station networks or Low Earth Orbit (LEO) satellites can be defined via receiver.
A list of simulated observation types can be defined via observationType. Noise can be added to both observations and clock errors
via noiseObervation and noiseClockReceiver, respectively. Observation noise is
interpreted as a factor that is multiplied to the accuracy derived from the accuracy pattern of the respective observation type
(see inputfileAccuracyDefinition in receiver).
The parametrization are used to simulate a priori models (e.g. troposphere, signal biases).
Parameter settings and outputfiles are ignored.
If the program is run on multiple processes the receivers
(stations or LEO satellites) are distributed over the processes.
Only satellite observable-specific signal biases (OSB) are supported at the moment.
If multiple entries exist for the same bias, the weighted average (based on time span) of all entries is used.
Time-variable biases are not supported at the moment.
This program converts potential coefficients from the GRACE SDS format
into potential coefficients file.
The program supports file formats for RL04 to RL06.
Within the program, the variables epochStart, epochEnd and epochMid
are populated with the corresponding time-stamps in the file.
These can be used in to outputfilePotentialCoefficients
to auto-generate the file name.
This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC)
from the orientation covariance matrices provided in Level-1B products via variance propagation.
By using the output outputfileSatelliteTrackingCovariance in PreprocessingSst,
noise model distinguishes between the stationary noise of ranging observations and the nonstationary AOC noise.
This program converts the atmospheric and ocean de-aliasing product (AOD1B)
from the GRACE SDS format into time spline files.
Multiple inputfiles must be given in the correct time order.
A linear method is assumed for the interpolation between the given points in time.
This program converts orientation data measured by the star cameras
from the GRACE Level-1A format (SCA1A) to the GROOPS instrument file format.
For further information see GraceL1a2Accelerometer.
This program converts Level-1A temperature measurments (HRT1B or HRT1A) to the GROOPS instrument file format.
The GRACE Level-1A format is described in GRACE given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz.
Multiple inputfiles must be given in the correct time order.
The output is one arc of satellite data which can include data gaps.
To split the arc in multiple gap free arcs use InstrumentSynchronize.
Multiple inputfiles must be given in the correct time order.
The output is one arc of satellite data which can include data gaps.
To split the arc in multiple gap free arcs use InstrumentSynchronize.
This program converts the reduced dynamical orbit
from the GRACE/GRACE-FO SDS format (GNV1B, GNI1B) into instrument file (ORBIT).
When GNV1B is used, the orbit can be rotated from the terrestrial reference frame (TRF) transformed into the celestial reference frame (CRF) by
specifying earthRotation.
This program converts low-low satellite data measured by the K-band ranging system
from the GRACE SDS format (KBR1B or LRI1B) into instrument file (SATELLITETRACKING).
The inputfiles contain also corrections to antenna offsets
and the so called light time correction. The corrections can be stored in additional files
in the same format as the observations.
If a phase break is found an artificial gap is created.
For further information see GraceL1b2Accelerometer.
This program computes star camera covariance matrices (instrument file, COVARIANE3D)
for a GRACE satellite under consideration of the active camera heads and an a priori variance factor.
This program reads vector orientation data (positions of instruments in the satellite frame) from the GRACE SDS format
(VGB1B, VGN1B, VGO1B, VKB1B, or VCM1B).
The outputfileVector is a $(3n\times1)$ matrix containing $(x,y,z)$ for each record.
The GRACE SDS format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027"
given at http://podaac.jpl.nasa.gov/grace/documentation.html.
This program computes the time shift between two co-orbiting satellites based on dynamic orbit data.
When applied to data of the first satellite, the computed time shift virtually shifts data of first satellite into the location of the second satellite.
Note that inputfileOrbit1 and inputfileOrbit2 need velocity and acceleration data, which
can be computed with OrbitAddVelocityAndAcceleration.
The program tries to find a minimum of the objective function
\[
f(\Delta t) = \| r_1(t) - r_2(t + \Delta t) \|^2,
\]by applying Newton's method to the first derivative, thus iteratively computing
\[
\Delta t_{k+1} = \Delta t_k + \frac{f'(\Delta t_k)}{f''(\Delta t_k)}.
\]This iteration is stopped when the difference between to consecutive time shift values falls below threshold or
maximumIterations is reached. An initialGuess of the time shift can speed up convergence.
This program converts the GRACE SOE (sequence of events) file/format into instrument file (MISCVALUES).
The GRACE SOE format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" and "TN-03_SOE_format.txt"
given at http://podaac.jpl.nasa.gov/grace/documentation.html.
The output is one arc of satellite data which can include data gaps.
Name
Type
Annotation
outputfileGraceA
filename
outputfileGraceB
filename
inputfile
filename
SoE file
events
choice
ACCT
sequence
DSHL HeaterDisconnect
mode
choice
Heater
DSHL HeaterDisconnect
SetPoint
temperature set point
AOCS
sequence
coarse pointing mode or attitude hold mode
mode
choice
CPM
coarse pointing mode
AHM
attitude hold mode
SM
science mode
ACCR
ACCR
CMCAL
sequence
CoM calibration maneuver
sampling
double
[seconds] create events between start and end of maneuver
KBRCAL
sequence
KBR calibration maneuver
sampling
double
[seconds] create events between start and end of maneuver
This program applies the Multi-Resolution Analysis (MRA) using
Discrete Wavelet Transform (DWT) to the monthly GRACE SST post-fit residuals.
First, the residuals are transferred into wavelet domain by applying an 8 level
Daubechies wavelet transform (default).
In the next step, detail coefficients are merged into three major groups
due to their approximate frequency subbands:
Low scale details, corresponding to the frequency band above 10 mHz;
Intermediate scale details, corresponding to the approximate frequency
range above 3 mHz up to 10 mHz;
High scale details, corresponding to the approximate frequency range
above 0.5 mHz up to 10 mHz.
In the last step, each group is reconstructed back into time domain.
This programs estimate satellite-to-satellite-tracking (SST) deterministic signals
due to eclipse transits and low-SNR values from post-fit residuals.
The low-SNR effects are estimated by directly using the residual values.
The ensemble averaging method is used to characterize the average properties of eclipse transit signal shapes across all transit events.
Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval.
Name
Type
Annotation
inputfileGraceResiduals
filename
SST Residuals
timeMargin
uint
epochs before instrumental events
waveLength
uint
length of the sample wave
estimateEclipseTransitScale
sequence
outputfileScaleModel
filename
inputfileGrace1EclipseFactor
filename
GRACE-A eclipse factors computed with integrated orbit
inputfileGrace2EclipseFactor
filename
GRACE-B eclipse factors computed with integrated orbit
Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions
into the star camera baffles of GRACE-A and eclipse transits of the satellites.
The events are determined by satellites' position (inputfileOrbit1/2)
and orientation (inputfileStarCamera1/2). Each type of event is represented
by its mid-interval point per orbit revolution and is reported in outputfileEvents.
The waveform of each event is nearly constant within one month and can be approximated by a polynomial.
For the purpose of gravity field recovery, each waveform is parameterized by a polynomial and the coefficients
of this polynomial are estimated as additional instrument calibration parameters in a common adjustment
with all other instrument, satellite, and gravity field parameters,
see parametrizationSatelliteTracking:specialEffect.
This program converts low-low satellite data measured by the K-band ranging system
from the GRAIL format into instrument file (SATELLITETRACKING).
The inputfiles contain also corrections for antenna offsets
and the so called light time correction.
The corrections can be stored in additional files in the same format as the observations.
If a phase break is found an artificial gap is created.
This program computes the absolute value of gravity $\left\lVert{\M g}\right\rVert$
of a gravityfield on a given grid.
The result is multiplicated with factor.
To get the full gravity vector in a terrestrial frame add
the centrifugal part, see gravityfield:tides:centrifugal.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
It is intended to compute gravity anomalies from absolute gravity observations.
To visualize the results use PlotMap.
This program computes a time series of time variable
gravityfield functionals averaged over a given area,
e.g. equivalent water heights in the amazon basin. The type of functional
(e.g gravity anomalies or geoid heights) can be choosen with kernel.
The average is performed at each time step by a weigthed average over all grid
points where the weight is the associated area at each point. If removeMean is set
the temporal mean is removed from the time series. To speed up the computation
the gravity field can be converted to spherical harmonics before the computation
with convertToHarmonics.
Additionally the root mean square of the values in the area at each time step
can is computed if compueRms is set.
Additionally the accuracy of the value at each time step can be computed if compueSigma is set.
The outputfileTimeSeries is an instrument file with one, two, or three data columns.
First data column contains the computed functionals and the following columns contain the RMS and the accuracies (optionally).
This program computes the deflections of the vertical $\xi$ in north direction
and $\eta$ in east direction in radian
according to
\[
\xi = g_x/\gamma \qquad\text{and}\qquad \eta=g_y/\gamma,
\]where $\M g=\nabla V$ is the gravity vector from gravityfield in
the local ellipsoidal system (north, east, up) and $\gamma$ is the normal gravity at that point.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
This program computes degree amplitudes from a gravityfield
and saves them to a matrix file with three columns: the degree, the degree amplitude, and the formal errors.
The coefficients can be converted to different functionals with kernel.
The gravity field can be evaluated at different altitudes by specifying evaluationRadius.
Polar regions can be excluded by setting polarGap.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly.
The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
This program computes a timeSeries
of a time variable gravityfield and saves it as degree amplitudes.
The expansion is limited in the range between minDegree and maxDegree inclusivly
\[
\sigma_n = \frac{GM}{R}\left(\frac{R}{r}\right)^{n+1}k_n\sqrt{\sum_{m=0}^n c_{nm}^2+s_{nm}^2}.
\]
The outputfileTimeSeries is a matrix with
every row containing the time, degree, degree amplitude, and the formal error.
This program computes a time series of displacements of a list of stations (grid)
due to the effect of time variable loading masses. The displacement $\M u$ of a station is calculated according to
\[\label{eq:displacement}
\M u(\M r) = \frac{1}{\gamma}\sum_{n=0}^\infty \left[\frac{h_n}{1+k_n}V_n(\M r)\,\M e_{up}
+ R\frac{l_n}{1+k_n}\left(
\frac{\partial V_n(\M r)}{\partial \M e_{north}}\M e_{north}
+\frac{\partial V_n(\M r)}{\partial \M e_{east}} \M e_{east}\right)\right],
\]where $\gamma$ is the normal gravity, the load Love and Shida numbers $h_n,l_n$ are given by
inputfileDeformationLoadLoveNumber and the load Love numbers $k_n$ are given by
inputfilePotentialLoadLoveNumber. The $V_n$ are the spherical harmonics expansion of
the full time variable gravitational potential (potential of the loading mass + deformation potential):
\[
V(\M r) = \sum_{n=0}^\infty V_n(\M r).
\]Deformations due to Earth tide and due to polar tides are computed using the IERS conventions.
Eq. \eqref{eq:displacement} is not used in these cases.
The outputfileTimeSeries is an instrument file, MISCVALUES.
The data columns contain the deformation of each station in $x,y,z$ in a global terrestrial
reference frame or alternatively in a local elliposidal frame (north, east, up)
if localReferenceFrame is set.
This program estimates an spatial and temporal covariance matrix from
a time series of gravity fields.
Firstly for every time step $t_i$
a spherical harmonics vector $\M x_i$ from the time variable gravity field
is generated. The coefficients of the spherical harmonics expansion are
in the sequence given by numbering.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly. The coefficients are related to the
reference radius R and the Earth gravitational constant GM.
In the next step the empirical covariance matrix is estimated according to
\[
\M\Sigma(\Delta i)_{full} = \frac{1}{N}\sum_{i=1}^N \M x_i \M x_{i+\Delta i}^T,
\]where $\Delta i$ is given by differenceStep.
From the diagonal elements of $\M\Sigma(\Delta i)$ the isotropic accuracies
are computed
\[
\sigma_n^2 = \frac{1}{2n+1}\sum_{m=0}^n \sigma_{cnm}^2+\sigma_{snm}^2,
\]and a diagonal matrix is constructed $\Sigma_{iso} = \text{diag}(\sigma_2^2,\ldots,\sigma_N^2)$.
The result is computed:
\[
\M\Sigma(\Delta i) = \alpha_{full}\M\Sigma(\Delta i)_{full}+\alpha_{iso}\M\Sigma(\Delta i)_{iso}.
\]
This program computes gravity gradients from gravityfield
on a grid in a global terrestrial reference frame
or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set.
In outputfileGriddedData the values $[Vxx, Vyy, Vzz, Vxy, Vxz, Vyz]$
will be saved together with points expressed as ellipsoidal coordinates
(longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.
This program computes gravity vectors from gravityfield
on a grid in a global terrestrial reference frame
or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set.
In outputfileGriddedData the values $[gx, gy, gz]$
will be saved together with points expressed as ellipsoidal coordinates
(longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.
This program propagates the covariance matrix of a gravityfield
evaluated at time to a grid. The full variance-covariance matrix is computed
and written to a matrix file:
\[
\mathbf{\Sigma}_\mathbf{y} = \mathbf{F}\mathbf{\Sigma}_\mathbf{x}\mathbf{F}^T
\]The kernel determines the quantity of the grid values, for example,
kernel:waterHeight.
This program computes values of a gravityfield on a given grid.
The type of value (e.g gravity anomalies or geoid heights) can be choosen with kernel.
If a time is given the gravity field will be evaluated at this point of time otherwise only the static part will be used.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
To speed up the computation the gravity field can be converted to spherical harmonics before the computation
with convertToHarmonics.
This program computes values of a gravityfield on a given grid
for each time step of timeSeries.
The type of value (e.g gravity anomalies or geoid heights) can be choosen with kernel.
To speed up the computation the gravity field can be converted to spherical harmonics before the computation
with convertToHarmonics.
The outputfileTimeSeries is an instrument (MISCVALUES) file with a data column
for each grid point per epoch.
This program evaluates a time variable gravityfield
at a given time and saves it as a spherical harmonics file.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly.
The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
This program computes a timeSeries
of a time variable gravityfield
and converts to coefficients of a spherical harmonics expansion.
The expansion is limited in the range between minDegree
and maxDegree inclusivly.
The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
The outputfileTimeSeries contains the potential coefficients
as data columns for each epoch in the sequence given by
numbering.
This program evaluates a time variable gravityfield at a given time
and saves a vector with the coefficients of a spherical harmonics expansion in the sequence given by
numbering.
If set the expansion is limited in the range between minDegree and maxDegree inclusively.
The coefficients are related to the reference radius R and the Earth gravitational constant GM.
This program estimates splines in time domain from a time variable gravity field
and writes outputfileTimeSplines.
The gravityfield is sampled at sampling, converted to potential coefficients
in the range between minDegree and maxDegree inclusively.
The time series of spherical harmonics can be temporal filtered with temporalFilter.
In the next step temporal splines with splineDegree and nodal points given
at splineTimeSeries are adjusted to the time series in a least squares sense.
This is very fast for block means (splineDegree = 0) but for other splines a large systems of equations
must be solved. In the adjustment process the time series of gravity fields can be interpreted as samples
at the given times or as continuous function with linear behaviour between sampled points (linearInterpolation).
This program estimates parametrizationTemporal
(e.g. mean, trend, annual) from a time variable gravity field.
In a first step a time variable gravityfield
is sampled at timeSeries
and converted to coefficients of a spherical harmonics expansion.
The expansion is limited in the range between minDegree
and maxDegree inclusively.
The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
This program computes the covariance between a source point given
by longitude/latitude (L, B) and the points of a grid
in terms of the functional given by kernel from the variance-covariance
matrix of a gravityfield evaluated at time.
If computeCorrelation is set, the program returns the correlation according to
\[
r_{ij} = \frac{\sigma_{ij}}{\sigma_i \sigma_j}
\]in the range of [-1, 1] instead of the covariance.
This program propagates variance-covariance matrix of a gravityfield
evaluated at time to the points of a grid in terms of the functional
given by kernel.
The resulting outputfileGriddedData contains the standard deviations of the grid
points.
This program computes a time series of area mean values
in a basin represented by border from a sequence of grid files.
If a file is not found, the epoch is skipped. Per default
the weighted average of all points in the given border is computed where the points are weighted by their area element.
If computeMean is set, the time average of each grid points is subtracted before the computation.
If multiplyWithArea is set, the weighted average is multiplied with the total basin area.
This is useful for computing the total mass in the basin.
The outputfileTimeSeries is an instrument file, where the first columns are the
mean value each data column in the grid files, followed by the the weighted RMS
for each data column in the grid files if computeRms is set.
If the number of data columns differs between the grid files, the remaining columns are padded with zeros.
This program assigns values inputfileGriddedData to the nearest points
of a new grid. If some of the new points are not filled in with data
emptyValue is used instead. If multiple points of the input fall on the same node
the result can be selected with statistics (e.g. mean, root mean square, min, max, … ).
It also is possible to simply count the number of data points that were assigned to each point.
Write a series of inputfileGriddedData
with the corresponding timeSeries
as a single gridded data time series file.
The splineDegree defines the possible temporal interpolation of data in the output file.
For a file with spline degree 0 (temporal block means) the time intervals
in which the grids are valid are defined between adjacent points in time.
Therefore one more point in time is needed than the number of input grid files for degree 0.
This program converts inputfileGriddedData
to outputfileMatrix with data columns.
The grid is expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
The content of the output matrix can be controlled by outColumn expressions
applied to every grid point. The common data variables for grids are available,
see dataVariables.
Name
Type
Annotation
outputfileMatrix
filename
point list as matrix with longitude and latitude values in columns and possible additional columns
inputfileGriddedData
filename
R
double
reference radius for ellipsoidal coordinates on output
inverseFlattening
double
reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates
This program converts a inputfileGriddedData
to a COARDS compliant NetCDF file. The output data can be defined with dataVariable.
You should add at least the attributes units, long_name, and maybe _FillValue
to the variables. For the dataVariable:value the standard dataVariables
are available to select the data columns of inputfileGriddedData.
This program estimate potential coefficients from inputfileGriddedData
gravity field functionals. It used a simple quadrature formular
\[
c_{nm} = \frac{1}{4\pi}\frac{R}{GM} \sum_i f_i \left(\frac{r_i}{R}\right)^{n+1} k_n C_{nm}(\lambda_i,\vartheta_i)\,\Delta\Phi_i
\]or a leastSquares adjustment with block diagonal normal matrix (order by order).
For the latter one the data must be regular distributed.
The values $f_i$ and the weights $\Delta\Phi_i$ are expressions
using the common data variables for grids, see dataVariables.
Multiple outputfilePotentialCoefficients can be estimated in one step.
For each an indivdual value must be specified.
The type of the gridded data (e.g gravity anomalies or geoid heights)
must be set with kernel $k_n$.
The expansion is limited in the range between minDegree
and maxDegree inclusively. The coefficients are related
to the reference radius R and the Earth gravitational constant GM.
Compute the spherical distance on the unit sphere in radians between all point pairs of two grids.
The spherical distance is computed by
\[
\psi_{12} = \arccos(\M n_1 \cdot \M n_2),
\]where $\M n_i$ is the (normalized) position. This implies that all points are projected onto the unit sphere.
Name
Type
Annotation
outputfileMatrix
filename
matrix containing the spherical distance between all point pairs [rad]
If groupDataByPoints is true the outputfileTimeSeries starts
for each epoch with all data (data0, data1… ) for the first point,
followed by all data of the second point and so on.
If groupDataByPoints is false, the file starts with data0
for all points, followed by all data1 and so on.
This enables the use of all instrument programs like InstrumentFilter or
InstrumentDetrend to analyze and manipulate time series of gridded data.
This program manipulates grid files with data in columns similar to
FunctionsCalculate, see there for more details.
If several inputfiles are given the data columns are copied side by side.
All inputfiles must contain the same grid points.
The columns are enumerated by data0, data1, … .
The content of outputfileGriddedData is controlled by outColumn.
The algorithm to compute the output is as follows:
The expressions in outColumn are evaluated once for each grid point of the input.
The variables data0, data1, … are replaced by the according values
from the input columns before.
Additional variables are available, e.g. index, data0rms,
see dataVariables.
For a simplified handling constants can be defined by name=value.
It is also possible to estimate parameters in a least squares adjustment.
The leastSquares serves as template for observation equations for every point.
The expression leastSquares is evaluated for each grid point.
The variables data0, data1, … are replaced by the according values from the input columns before.
In the next step the parameters are estimated in order to minimize the expressions in leastSquares
in the sense of least squares.
Afterwards grid points are removed if one of the removalCriteria expressions
for this grid point evaluates true (not zero).
An extra statistics:outputfile can be generated with one row of data.
For the computation of the outColumn values
all dataVariables are available
(e.g. data3mean, data4std) inclusively the constants and
estimated parameters but without the data0, data1, … itself.
The variables and the numbering of the columns refers to the outputfileGriddedData.
This program concatenate grid from several inputfileGriddedData
and write it to a new outputfileGriddedData.
Input files must have the same number of data columns.
If sort is enabled, the points are sorted by latitudes starting from north/west to south east.
Identical points (within a margin) can be removed with removeDuplicates.
This program creates a grid and writes it to outputfileGrid.
The grid is expressed as ellipsoidal coordinates (longitude, latitude, height)
based on a reference ellipsoid with parameters R and inverseFlattening.
Extra value columns can be appended using expressions
with the common data variables for gridded data.
Interpolate values of a regular rectangular inputfileGriddedData
to new points given by grid and write as outputfileGriddedData.
Only longitude and latitude of points are considered; the height is ignored for interpolation.
(Only nearest neighbor method is implemented at the moment.)
Figure: Interpolation of point data from rectangular gridded data.
Generate coarse grid by computing area weighted mean values.
The number of points is decimated by averaging integer multiplies of grid points
(multiplierLongitude, multiplierLatitude).
if volumeConserving is set, data are interpreted as heights above ellipsoid
and the tesseroid volume
\[
V=\int_r^{r+H}\int_{\varphi_1}^{\varphi_2}\int_{\lambda_1}^{\lambda_2} r^2\cos\varphi\,d\varphi\,d\lambda\,dr
\]is conserved, where $r$ is the radius of the ellipsoid at grid center and
$(\varphi_1-\varphi_2)\times(\lambda_1-\lambda_2)$ are the grid cell boundaries.
This is meaninful for Digital Elevation Models (DEM).
The fine grid can be written, where the first coarse grid values (data0) are additionally appended.
The output data can be defined with dataVariable.
You should add at least the attributes units, long_name, and maybe _FillValue
to the variables. The dataVariable:inputColumn selects the data from the input file.
If timeSeries is not set
the temporal nodal points from the inputfile are used.
Estimate interior and exterior potential coefficients for atmosphere above digital terrain models.
Coefficients for interior $(1/r)^{n+1}$ and exterior ($r^n$) are computed.
The density of the atmosphere is assumed to be (Sjöberg, 1998)
\[
\rho_0\left(\frac{R}{R+h}\right)^\nu,
\]where $R$ is the radial distance of the ellipsoid at each point, $h$ the radial height above the ellipsoid,
$\rho_0$ is densitySeaLevel and nu $\nu$ is a constant factor. The density is integrated
from radialLowerBound and upperAtmosphericBoundary above the ellipsoid.
The radialLowerBound is typically the topography and can be computed as expression at every point
from inputfileGriddedData.
Name
Type
Annotation
outputfilePotentialCoefficientsExterior
filename
outputfilePotentialCoefficientsInterior
filename
inputfileGriddedData
filename
Digital Terrain Model
densitySeaLevel
double
[kg/m**3]
ny
double
Constant for Atmosphere
radialLowerBound
expression
expression (variables 'L', 'B', 'height', 'data', and 'area' are taken from the gridded data
upperAtmosphericBoundary
double
constant upper bound [m]
factor
double
the result is multiplied by this factor, set -1 to subtract the field
Runs programs in a group, which can be used to structure a config file.
If catchErrors is enabled and an error occurs, the remaining programs
are skipped and execution continues with errorPrograms, in case any are defined.
Otherwise an exception is thrown.
The silently option disables the screen ouput of the programs.
With outputfileLog a log file is written for this group additional to a global log file.
This might be helpful within LoopPrograms with parallel iterations.
This program creates potential coefficients from the defining constants
of a Geodetic Reference System (GRS). The potential coeffiencts excludes the centrifugal part.
The form of the reference ellipsoid is either determined by the dynamical form factor J2,
or the geometric inverseFlattening. One of those form parameters must be specified.
The default values create the GRS80.
Name
Type
Annotation
outputfilePotentialCoefficients
filename
maxDegree
uint
GM
double
Geocentric gravitational constant
R
double
reference radius
omega
double
Angular velocity of rotation
J2
double
Dynamical form factor
inverseFlattening
double
Geometric inverse flattening of reference ellipsoid (0: sphere, ignored when J2 is set)
static potential coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid; (icgem1.0) epochReference
outputfileTrendCoefficients
filename
trend potential coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid; (icgem1.0) epochReference
outputfileOscillationCosine
filename
oscillation cosine coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid, oscillationPeriod; (icgem1.0) epochReference, oscillationPeriod
outputfileOscillationSine
filename
oscillation sine coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid, oscillationPeriod; (icgem1.0) epochReference, oscillationPeriod
outputfileIntervals
filename
two column ASCII file with all intervals found (only sensible for icgem2.0). The base name will be extended with .static, .trend, .annualCos, and .annualSin.
inputfileIcgem
filename
ICGEM GFC file
useFormalErrors
boolean
use formal errors if both formal and calibrated errors are given
This program computes the overlapping Allan variance from an
inputfileInstrument.
The estimate is averaged over all arcs (arcs are assumed to contain no data gaps).
The overlapping Allan variance is defined as
\[
\sigma^2(m\tau_0) = \frac{1}{2(m\tau_0)^2(N-2m)} \sum_{n=1}^{N-2m}(x_{n+2m}-2x_{n+m}+x_n)^2,
\]where $m\tau_0$ is the averaging interval defined by the median sampling $\tau_0$.
Name
Type
Annotation
outputfileAllanVariance
filename
column 0: averaging interval [seconds], column 1-(n-1): Allan variance for each data column
This estimates a covariance function of inputfileInstrument
for all selected columns with startDataFields and countDataFields.
The estimation is performed robustly via variance component estimation.
Bad arcs are downweigthed and the accuracies can be written with outputfileSigmasPerArc.
The length of the covariance functions are determined by the longest arc.
Additionaly the data can be detrended with parameter
and parameterPerArc.
This program computes the cross correlation between all corresponding data columns
in two instrument files. The instrument files must be synchronized (InstrumentSynchronize).
The outputfileCorrelation is a matrix with the first column containing the time lag followed by
cross-correlation function for each data column. The maximum lag is defined by the maximum arc length.
The correlation is based on the unbiased estimate of the cross-covariance between data columns $x$ and $y$,
\[
\sigma_{xy}(h) = \frac{1}{N}\sum_{k=1} x_{k+h} y_k,
\]which is averaged over all arcs. From this estimate, the correlation for each lag is then computed via
\[
r_{xy}(h) = \frac{\sigma_{xy}(h)}{\sigma_x(0)\sigma_y(0)},
\]which is the ratio between the biased estimates of the cross-covariance at lag $h$ and the auto-covariance of the individual data columns.
For instrument with data gaps, lag bins without any data are set to NAN.
This program computes the arc-wise histogram from an instrument file.
The output is a matrix with the first column containing the lower bound of each bin.
The other columns contain the histograms for each arc.
Figure: GRACE range-rate residuals of one month (one arc) divided into 50 bins.
Name
Type
Annotation
outputfileMatrix
filename
column 1: lower bin bound; columns 2 to N: histogram of each arc
inputfileInstrument
filename
selectDataField
uint
select channel for histogram computation
binCount
uint
(default: Freedman-Diaconis' choice, maximum of all channels)
lowerBound
expression
lower bound for bins (default: global minimum, data values outside are ignored)
upperBound
expression
upper bound for bins (default: global maximum, data values outside are ignored)
This program computes the power spectral density (PSD) for all data fields in an instrument file.
The PSD is computed using Lomb's method. For each arc and each frequency $f$, a sinusoid is fit to the data
\[
l_i = a \cos(2\pi f t_i) + b \sin(2\pi f t_i) + e_i
\]
The PSD for this frequency is then computed by forming the square sum of adjusted observations:
\[
P(f) = \sum_i \hat{l}^2_i.
\]
The resulting PSD is the average over all arcs. For regularly sampled time series,
this method yields the same results as FFT based PSD estimates.
A regular frequency grid based on the longest arc and the median sampling is computed.
The maximum number of epochs per arc is determined by
\[
N = \frac{t_{\text{end}} - t_{\text{start}}}{\Delta t_{\text{median}} } + 1,
\]the Nyquist frequency is given by
\[
f_{\text{nyq}} = \frac{1}{2\Delta t_{\text{median}}}.
\]
If it is suspected that inputfileInstrument contains secular variations,
the input should be detrended using InstrumentDetrend.
Each separate inputfileInstrument represents an entry (e.g. a satellite or station)
in the resulting grid. Therefore, providing, for example, 32 orbit files of GPS satellites
results in a grid with columns: mjd, id (0-31), rms.
The first three data columns of the instrument data are considered for computation of the RMS values.
The factor can be set to, for example, sqrt(3) to get 3D instead of 1D RMS values.
If timeIntervals are provided, each inputfileInstrument
and inputfileInstrumentReference serves as a template with variable loopTime.
This allows concatenation of instrument files, for example to create a month-long RMS plot grid from daily GPS
orbit files (see below).
Helmert parameters between the two frames can be estimated each epoch optionally if
estimateShift, estimateScale, or estimateRotation are set.
It uses a robust least squares adjustment.
Figure: Comparison of estimated GPS orbits with IGS final solution.
Name
Type
Annotation
outputfileRmsPlotGrid
filename
columns: mjd, id, rms
outputfileHelmertTimeSeries
filename
columns: mjd, tx, ty, tz, scale, rx, ry, rz
inputfileInstrument
filename
one file per satellite/station
inputfileInstrumentReference
filename
one file per satellite/station, same order as above
This program computes the wavelet transform of a time series up to a maxLevel.
The scalogram is written to a matrix which can be plotted by using a gridded layer in PlotGraph.
Individual detail levels can be written to matrix files by setting outputfileLevels.
The data column to be decomposed must be set by selectDataField.
The wavelet transform is implemented as a filter bank, so care should be taken when the input contains data gaps.
Low/highpass wavelet filters are applied in forward and backward direction, input is padded symmetric.
See digitalFilter for details.
This program computes the spectral coherence between two instrument files.
The (magnitude-squared) coherence is defined as
\[
C_{xy}(f) = \frac{|P_{xy}(f)|^2}{P_{xx}(f)P_{yy}(f)}
\]and is a measure in the range [0, 1] for the similarity of the signals $x$ and $y$ in frequency domain.
$P_{xy}$ is the cross-spectral density between $x$ and $y$ and $P_{xx}$, $P_{yy}$ are auto-spectral densities.
Auto- and cross-spectral densities are computed using Lomb's method (see Instrument2PowerSpectralDensity for details).
The resulting PSD is the average over all arcs. For regularly sampled time series,
this method yields the same results as FFT based PSD estimates.
A regular frequency grid based on the longest arc and the median sampling is computed.
The maximum number of epochs per arc is determined by
\[
N = \frac{t_{\text{end}} - t_{\text{start}}}{\Delta t_{\text{median}} } + 1,
\]the Nyquist frequency is given by
\[
f_{\text{nyq}} = \frac{1}{2\Delta t_{\text{median}}}.
\]
If it is suspected that inputfileInstrument contains secular variations,
the input should be detrended using InstrumentDetrend.
The outputfileCoherence contains a matrix with the frequency vector as first column,
the coherence for each instrument channel is saved in the following columns.
This program applies the Short Time Fourier Transform (STFT) to selected data columns
of inputfileInstrument and computes the spectrogram.
The STFT is computed at centered timeSeries with
an (possible overlapping) rectangular window with windowLength seconds.
Data gaps are zero padded within the window.
The outputfileSpectrogram is a matrix with each row the time (MJD),
the frequency $[Hz]$, and the amplitudes $[unit/\sqrt{Hz}]$ for the selected data columns.
It can be plotted with PlotGraph.
Figure: GRACE range-rate residuals of one month (window of 6 hours).
This program estimates neutral mass densities along the satellite trajectory based on accelerometer data.
In order to determine the neutral mass density the accelerometer input should only reflect the accelerations due to drag
(e.g. miscAccelerations:atmosphericDrag).
Thus, influences from solar and Earth radiation pressure must be reduced beforehand.
Name
Type
Annotation
outputfileDensity
filename
MISCVALUE (kg/m^3)
satelliteModel
filename
satellite macro model
inputfileOrbit
filename
inputfileStarCamera
filename
inputfileAccelerometer
filename
add non-gravitational forces in satellite reference frame
If inputfileThruster is given, the corresponding epochs
(within marginThruster) are not used for the parameter estimation,
but the accelerometer epochs are still calibrated afterwards.
An arbitrary instrument file is allowed here.
This program estimates calibration parameters for acceleration data given given an optional reference acceleration.
Specifically, the program solves the equation
\[
\mathbf{a} - \mathbf{a}_\text{ref} = \mathbf{f}(\mathbf{x}) + \mathbf{e}
\]for the unknown parameters $\mathbf{x}$, where $\mathbf{a}$ is given in inputfileAccelerometer and
$\mathbf{a}_\text{ref}$ is given in inputfileAccelerometerReference.
The parametrization of $\mathbf{x}$ can be set via parametrizationAcceleration.
Optionally, the empirical covariance functions for the accelerations $\mathbf{a}$ can be estimated by enabling estimateCovarianceFunctions.
This program manipulates the data columns every arc of an instrument file similar to
FunctionsCalculate, see there for more details.
If several inputfileInstruments are given the data columns are copied side by side.
For this the instrument files must be synchronized (see InstrumentSynchronize). For the data
columns the standard data variables are available, see dataVariables.
For the time column (MJD) a variable epoch (together with epochmean, epochmin, … )
is defined additionally.
The content of outputfileInstrument is controlled by outColumn.
The number of outColumn must agree with the selected outType.
The algorithm to compute the output is as follows:
The expressions in outColumn are evaluated once for each epoch of the input.
The variables data0, data1, … are replaced by the according values from the input columns before.
If no outColumn are specified all input columns are used instead directly.
The instrument type can be specified with outType and must be agree with the number of columns.
An extra statistics file can be generated with one mid epoch per arc. For the computation of the outColumn values
all dataVariables are available (e.g. epochmin, data0mean, data1std, … )
inclusively the constants and estimated parameters but without the data0, data1, … itself.
The variables and the numbering of the columns refers to the outputfileInstrument.
Computes statistics of selected data columns between two instrument files arc wise.
The outputfileStatisticsTimeSeries contains for every arc one (mid) epoch
with statistics column(s). Possible statistics are
Computes statistics of selected data columns of inputfileInstrument arc wise.
The outputfileStatisticsTimeSeries contains for every arc one (mid) epoch
with statistics column(s). Possible statistics are root mean square, standard deviation,
mean, median, min, and max.
With perColumn separate statistics for each selected data column are computed,
otherwise an overall value is computed.
This program concatenate the arcs from several instrument files
and write it to a new file. Input files must be of the same type.
The arcs are merged to one arc even though there is a gap inbetween.
To split the data into arcs use InstrumentSynchronize.
Three options are available: sort, removeDuplicates and checkForNaNs.
If sort is enabled, the program reads all files, no matter if they are sorted correctly in time, and
then sorts the epochs. If removeDuplicates is enabled, the program checks the whole data set
for epochs that are contained twice. And if checkForNaNs is enabled the data set is checked for
invalid epochs containing NaNs.
Name
Type
Annotation
outputfile
filename
inputfile
filename
sort
boolean
sort epochs with increasing time
removeDuplicates
choice
remove duplicate epochs
keepFirst
sequence
keep first epoch with the same time stamp, remove all others
margin
double
margin for identical times [seconds]
keepLast
sequence
keep last epoch with the same time stamp, remove all others
margin
double
margin for identical times [seconds]
checkForNaNs
boolean
remove epochs with NaN values in one of the data fields
The outputfileTimeSeriesArcParameters contains for every arc one (mid) epoch
with the estimated parameters. The order is: first all data (data0, data1, … )
of first temporal parameter, followed by all data of the second temporal parameter and so on.
This program estimates the empirical auto- and cross-covariance of selected data columns per arc
of inputfileInstrument.
The maximum computed lag is determined by the number of outputfileCovarianceMatrix specified
(for a single output file only the auto-covariance is determined, for two output files auto- and cross-covariance is computed and so on).
Stationarity is assumed for the input time series, which means the temporal covariance matrix has Toeplitz structure.
\[
\begin{bmatrix}
\Sigma & \Sigma_{\Delta_1} & \Sigma_{\Delta_2} & \Sigma_{\Delta_3} & \Sigma_{\Delta_4} \\
& \Sigma & \Sigma_{\Delta_1} & \Sigma_{\Delta_2} & \Sigma_{\Delta_3} \\
& & \Sigma & \Sigma_{\Delta_1} & \Sigma_{\Delta_2} \\
& & & \Sigma & \Sigma_{\Delta_1} \\
& & & & \Sigma \\
\end{bmatrix}
\]
The matrix for lag $h$ describes the covariance between $x_{t-h}$ and $x_{t}$, i.e. $\Sigma(t-h, t)$.
To get a reliable estimate, InstrumentDetrend should be called first.
This program estimates a 3D Helmert transformation between two networks
(frame realizations, e.g. GNSS satellite or station network).
Each separate data represents a satellite/station/… (e.g. 32 GPS satellites).
The instrument data (x,y,z position) considered can be set with startData.
The Helmert parameters are set up according to parametrizationTemporal
for each timeIntervals and are estimated using a
robust least squares adjustment.
Name
Type
Annotation
outputfileHelmertTimeSeries
filename
columns: mjd, Tx,Ty,Tz,s,Rx,Ry,Rz according to temporal parametrization
Convert selected GNSS observations or residuals into a simpler time series format.
The outputfileTimeSeries is an instrument file (MISCVALUES).
For each epoch the first data column contains the PRN, the second the satellite system,
followed by a column for each GNSS type.
As normally more than one GNSS transmitter is tracked per epoch, the output file
has several lines per observed epoch (epochs with the same time, one for each transmitter).
The second data column of the output contains a number representating the system
71: 'G', GPS
82: 'R', GLONASS
69: 'E', GALILEO
67: 'C', BDS
83: 'S', SBAS
74: 'J', QZSS
73: 'I', IRNSS .
A GNSS residual file includes additional information
besides the residuals, which can also be selected with type
A1*, E1*: azimuth and elevation at receiver
A2*, E2*: azimuth and elevation at transmitter
I**: Estimated slant total electron content (STEC)
Furthermore these files may include for each residual type
information about the redundancy and the accuracy relation $\sigma/\sigma_0$
of the estimated $\sigma$ versus the apriori $\sigma_0$ from the least squares adjustment.
The three values (residuals, redundancy, $\sigma/\sigma_0$) are coded with the same type.
To get access to all values the corresponding type must be repeated in type.
Example: Selected GPS phase residuals (type='L1*G' and type='L2*G').
Plotted with PlotGraph with two layer:linesAndPoints
(valueX='data0', valueY='100*data3+data1' and valueY='100*data4+data1' respectively).
Figure: GPS residuals in cm, shifted by PRN
Name
Type
Annotation
outputfileTimeSeries
filename
Instrument (MISCVALUES): prn, system, values for each type
This program multiply instrument data with a factor and add them together.
Afterwards the mean of each arc and data column can be removed with removeArcMean.
The instrument files must be synchronized (InstrumentSynchronize).
This program reduce the sampling of a instrument file. Only epochs with a time stamp
with a division by sampling without remainder are kept (inside margin).
This program removes epochs from inputfileInstrument
by evaluating a set of removalCriteria expressions. For the data
columns the standard data variables are available,
see dataVariables.
The instrument data can be reduced by data from inputfileInstrumentReference
prior to evaluation of the expressions.
To reduce the data by its median, use an expression like data1-data1mean.
To remove epochs that deviate by more than 3 sigma use abs(data1)>3*data1std
or abs(data0-data0median)>3*1.4826*data0mad.
All arcs in the input instrument file are concatenated, meaning expressions
like data1mean refer to the complete dataset. The removed epochs can be saved
in a separate outputfileInstrumentRemovedEpochs.
Name
Type
Annotation
outputfileInstrument
filename
all data is stored in one arc
outputfileInstrumentRemovedEpochs
filename
all data is stored in one arc
inputfileInstrument
filename
arcs are concatenated for processing
inputfileInstrumentReference
filename
if given, the reference data is reduced prior to the expressions being evaluated
removalCriteria
expression
epochs are removed if one criterion evaluates true. data0 is the first data field.
margin
double
remove data around identified epochs (on both sides) [seconds]
This program compares an instrument file with a
time series.
Epochs contained within the time series (including a defined margin)
are removed from the instrument file. The margin is added on
both sides of the epochs. The arcs of the instrument file are
concatenated to one arc. The removed epochs can be saved
in a separate instrument file.
This program remove epochs from an instrument file.
The epochs are defined by a thruster file
plus a defined margin before and after the thruster firings.
The arcs of the instrument file are concatenated to one arc.
The removed epochs can be saved in a separate instrument file.
This program can also be used to reduce the sampling of an instrument file,
but a better way to reduce the sampling of noisy data with regular sampling
is to use a low pass filter first with InstrumentFilter and then thin
out the data with InstrumentReduceSampling.
This program rotates instrument data into a new reference frame
(using inputfileStarCamera).
The rotation is usually done from satellite frame into inertial frame.
This program computes the correction due to offset of the antenna center relative the center of mass.
The offsets $\M c_A$ and $\M c_B$ in inputfileAntennaCenters are given in the satellite
reference frame. These offsets are rotated into the the inertial frame with $\M D_A$ and $\M D_B$ from
inputfileStarCamera and projected onto the line of sight (LOS)
\[
\rho_{AOC} = \M e_{AB}\cdot(\M D_A\,\M c_A - \M D_B\,\M c_B),
\]with the unit vector in line of sight direction
\[
\M e_{AB} = \frac{\M r_B - \M r_A}{\left\lVert{\M r_B - \M r_A}\right\rVert}.
\]The corrections for the range-rates and range-acceleration are computed by differentiating
an interpolation polynomial of degree interpolationDegree.
Name
Type
Annotation
outputfileSatelliteTracking
filename
corrections for range, range-rate, and range-accelerations
inputfileOrbit1
filename
inputfileOrbit2
filename
inputfileStarCamera1
filename
inputfileStarCamera2
filename
antennaCenters
choice
KBR antenna phase center
value
sequence
center1X
double
x-coordinate of antenna position in SRF [m] for GRACEA
center1Y
double
y-coordinate of antenna position in SRF [m] for GRACEA
center1Z
double
z-coordinate of antenna position in SRF [m] for GRACEA
center2X
double
x-coordinate of antenna position in SRF [m] for GRACEB
center2Y
double
y-coordinate of antenna position in SRF [m] for GRACEB
center2Z
double
z-coordinate of antenna position in SRF [m] for GRACEB
file
sequence
inputAntennaCenters
filename
interpolationDegree
uint
differentiation by polynomial approximation of degree n
This program derivate from a time series of quaternions
a series of angular rates and angular accelerations.
The derivatives are computed by a polynomial interpolation
with interpolationDegree of the quaternions.
Name
Type
Annotation
outputfileAngularRate
filename
[rad/s], VECTOR3D
outputfileAngularAcc
filename
[rad/s**2], VECTOR3D
inputfileStarCamera
filename
interpolationDegree
uint
derivation by polynomial interpolation of degree n
This program estimates the satellites orientation from star camera data
inputfileStarCamera and angular accelerometer data
inputfileAngularAcc. The combination of both observation types
is achieved in a least square adjustment. The optimal weighting between the two different
observation groups is achieved by means of VCE in combination with a robust estimator.
The system of linearized observation equations within the sensor fusion approach can be formulated as:
\[
\begin{bmatrix}
\M l_{ACC1B}\\
\M l_{SCA1B}
\end{bmatrix}
=
\begin{bmatrix}
\M A_{ACC1B} & \M B_{ACC1B}\\
\M A_{SCA1B} & \M 0
\end{bmatrix}
\begin{bmatrix}
\M q\\
\M b
\end{bmatrix}
=
\begin{bmatrix}
\frac{\partial \dot{\boldsymbol{\omega}}}{\partial \M q} & \frac{\partial \dot{\boldsymbol{\omega}}}{\partial \M b}\\
\M I & \M 0
\end{bmatrix}
\begin{bmatrix}
\M q\\
\M b
\end{bmatrix}
\]with
\[\begin{split}
\M l_{ACC1B} &= \dot{\boldsymbol{\omega}}_{ACC1B} - \dot{\boldsymbol{\omega}}_{0}, \\
\M l_{SCA1B} &= \M q_{SCA1B} - \M q_{0}, \\
\M q_{Fusion} &= \M q + \M q_{0}.
\end{split}\]The reference values $\M q_{0}$ and $\dot{\boldsymbol{\omega}}_{0}$ are derived
from inputfileStarCameraReference. In the course of the estimation,
the accelerometer data is calibrated, by setting a bias factor $\M b$ with accBias.
Name
Type
Annotation
outputfileStarCamera
filename
combined quaternions
outputfileCovariance
filename
epoch-wise covariance matrix
outputfileCovarianceMatrix
filename
full arc-wise covariance matrix per arc. arc number is appended to filename
This program computes a time series of statistics for one or more instrument files.
Possible statistics are root mean square, standard deviation, mean, median, min, and max.
The columns of the output time series are defined either as one per inputfileInstrument
or, if perColumn is true, statistics are computed per column for each file.
Providing e.g. 32 orbit files of GPS satellites results in a time series matrix
with columns: mjd, statisticsG01, statisticsG02, ..., statisticsG32.
If intervals are provided, the input data is split into these intervals
and one statistic is computed per interval. Otherwise, overall statistics are computed.
The instrument data considered for computation of the component-wise statistics
can be set with startDataFields and countDataFields.
The factor can be set to e.g. sqrt(3) to get 3D instead of 1D RMS values.
This program reads several instrument files and synchronize the data.
Every epoch with some missing data will be deleted so the remaining epochs
have data from every instrument.
In a second step the epochs are divided into arcs with maximal epochs
(or maxArcLen) without having a gap inside an arc.
A Gap is defined by a time step with at least minGap seconds
between consecutive epochs or if not set the 1.5 of the median sampling.
Arc with an epoch count less than minArcLen will be rejected.
A specific region can be selected with border.
In this case one of the instrument data must be an orbit.
If timeIntervals is given the data are also divided into time bins.
The assignment of arcs to the bins can be saved in outputfileArcList.
This file can be used for the variational equation approach or KalmanBuildNormals.
Instrument files from irregularData are not synchronized but
divided into the same number of arcs within the same time intervals.
Data outside the defined arcs will be deleted.
Name
Type
Annotation
data
sequence
outputfileInstrument
filename
inputfileInstrument
filename
margin
double
margin for identical times [seconds]
minGap
double
minimal time to define a gap and to begin a new arc, 0: no dividing [seconds], if not set 1.5*median sampling is used
minArcLength
uint
minimal number of epochs of an arc
maxArcLength
uint
maximal number of epochs of an arc
arcType
choice
all arcs or only ascending or descending arcs are selected
This program performs a multilevel one-dimensional wavelet analysis on one selectDataField
data column of inputfileInstrument.
The outputfileInstrument contains the decomposed levels in time domain ${a_J,d_J,...,d_1}$
Name
Type
Annotation
outputfileInstrument
filename
MISCVALUES, decomposed levels in time domain a_J,d_J,...,d_1
This program sets up normal equations based on observation
for short-term gravity field variations.
It computes the normal equations based on the intervals $i \in \{1, ..., N\}$ given in the arcList.
It sets up the least squares adjustment
\[
\begin{bmatrix}
\mathbf{l}_1 \\
\mathbf{l}_2 \\
\vdots \\
\mathbf{l}_N \\
\end{bmatrix}
=
\begin{bmatrix}
\mathbf{A}_1 & & & \\
& \mathbf{A}_2 & &\\
& & \ddots & \\
& & & \mathbf{A}_N \\
\end{bmatrix}
\begin{bmatrix}
\mathbf{x}^{(1)} \\
\mathbf{x}^{(2)} \\
\vdots \\
\mathbf{x}^{(N)} \\
\end{bmatrix}
+
\begin{bmatrix}
\mathbf{e}_1 \\
\mathbf{e}_2 \\
\vdots \\
\mathbf{e}_N \\
\end{bmatrix},
\]and subsequently computes the normal equations $\mathbf{N}_i, \mathbf{n}_i$ for each interval.
If eliminateNonGravityParameters is true, all non-gravity parameters are eliminated before the normals
are written to outputfileNormalEquation.
For each time interval in arcList a single normal equation file is written.
The program computes time variable gravity fields using the Kalman filter approach of
Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012).
Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48.
https://doi.org/10.1016/j.jog.2012.02.006.
The updated state $\mathbf{x}_t^+$ is determined by solving the least squares adjustment
\[
\mathbf{l}_t = \mathbf{A}_t \mathbf{x}_t + \mathbf{e}_t \hspace{25pt} \mathbf{e}_t \sim \mathcal{N}(0, \mathbf{R}_t)\\
\mathbf{B} \mathbf{x}^+_{t-1} = \mathbf{I} \mathbf{x}_t + \mathbf{v}_t\hspace{25pt} \mathbf{v} \sim \mathcal{N}(0,\mathbf{Q} + \mathbf{B} \mathbf{P}^+_{t-1}\mathbf{B}^T).
\]In normal equation form this can be written as
\[
\hat{\mathbf{x}}_t = \mathbf{x}^+_t = (\mathbf{N}_t + \mathbf{P}^{-^{-1}}_t)^{-1}(\mathbf{n}_t + \mathbf{P}^{-^{-1}}_t \mathbf{x}^-_t),
\]where $\mathbf{x}_t^- = \mathbf{B} \mathbf{x}^+_{t-1}$ and $\mathbf{P}_t^{-} = \mathbf{Q} + \mathbf{B} \mathbf{P}^+_{t-1}\mathbf{B}^T$
are the predicted state and its covariance matrix.
The process dynamic $\mathbf{B}, \mathbf{Q}$ is represented as an autoregressive model,
and passed to the program through inputfileAutoregressiveModel.
The sequence of normal equations $\mathbf{N}_t, \mathbf{n}_t$ are given as list of inputfileNormalEquations,
which can be generated using loops.
In the same way, the matrix files for outputfileUpdatedState and inputfileUpdatedStateCovariance
can also be specified using loops.
Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by KalmanFilter.
This is the implementation of the approach presented in
Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012).
Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48.
https://doi.org/10.1016/j.jog.2012.02.006.
This program estimates temporal gravity field variations with a constraint least squares adjustment.
Prior information is introduced by means of a autoregressiveModelSequence
which represent a stationary random process (see the autoregressive model description) for details.
Create signal standard deviations of potential coefficients according Kaula's rule of thumb
\[
\sigma_n = \frac{f}{n^p},
\]with the degree $n$, the factor $f$, and the power $p$.
Create variances of spherical harmonics by convolution a kernel with white noise,
e.g. to display filter coefficients of a Gaussian filter.
The coefficients are written as formal errors of outputfilePotentialCoefficients.
Compute rotation (StarCamera file) from local level frame (ellipsoidal north, east, down)
to TRF for positions given in inputfileInstrument (first 3 data columns).
Name
Type
Annotation
outputfileStarCamera
filename
rotation matrix from local level frame (ellipsoidal north, east, down) to TRF
inputfileInstrument
filename
origin of local level frame
constantOriginPerArc
boolean
use constant origin for all epochs of an arc (median position)
R
double
reference radius for ellipsoidal coordinates
inverseFlattening
double
reference flattening for ellipsoidal coordinates, 0: spherical coordinates
If continueAfterError=yes and an error occurs, the remaining programs in the current iteration
are skipped and the loop continues with the next iteration. Otherwise an exception is thrown.
If this program is executed on multiple processing nodes, the iterations can be computed in parallel,
see parallelization. The first process serves as load balancer
and the other processes are assigned to iterations according to processCountPerIteration.
For example, running a loop containing three iterations on 13 processes with processCountPerIteration=4,
runs the three iterations in parallel, with each iteration being assigned four processes.
With parallelLog=yes all processes write output to screen and the log file.
As the output can be quite confusing in this case, running GroupPrograms with an extra outputfileLog
for each iteration (use the loop variables for the name of the log files) might be helpful.
This program reads a matrix file with data in columns
and convert into gridded data.
The input columns are enumerated by data0, data1, … ,
see dataVariables.
This program creates a matrix from multiple matrices.
All matrices are summed up. The size of the resulting matrix is exandeded to fit all matrices.
The class matrixGenerator allows complex matrix operations before.
Computes the equilibrium ocean tide of the long periodic tideGeneratingPotential.
The spherical harmonics expansion up to maxDegree with GM and R
is estimated using a least squares adjustment.
The inputfileDensityGrid must be a global regular grid with the
vertically averaged seawater density over the ocean and zero over land.
Additionally the effects of the solid Earth tide are considered,
both the gravitational (Love numbers k20, k20plus)
and the geometrical (Love numbers h20,0, h20,2) effect.
This program converts a COARDS compliant NetCDF file into an
outputfileGriddedData.
If no specific input variableNameData are selected all suitable data are used.
If the NETCDF file contains a time axis (variableNameData) an specific epoch
can be selected with time. The nearest epoch in file is used.
This program converts a COARDS compliant NetCDF file into
outputfileGriddedDataTimeSeries.
If no specific input variableNameData are selected all suitable data are used.
The inverse of the normal matrix of inputfileNormalEquation
represents the covariance matrix of the estimated parameters. This program generates a noise vector with
\[
\M\Sigma(\M e) = \M N^{-1},
\]if generated input noise is standard white noise.
The noise vector is computed with
\[
\M e = \M W^{-T} \M z,
\]where $\M z$ is the generated noise and
$\M W$ is the cholesky upper triangle matrix of the normal matrix $\M N=\M W^T\M W$.
Name
Type
Annotation
outputfileNoise
filename
generated noise as matrix: parameterCount x sampleCount
This program adds noise to simulated satellite's positions
and velocities generated by SimulateOrbit (along, cross, radial).
See noiseGenerator for details on noise options.
This program accumulates normal equations and writes the total combined system to
outputfileNormalequation.
The inputfileNormalEquations must have all the same size and the same block structure.
This program is the simplified and fast version of the more general program NormalsBuild.
For input normals with different parameters, see NormalsReorderAndAccumulate.
This program accumulates normalEquations and
writes the total combined system to outputfileNormalequation.
For a detailed description of the used algorithm see normalEquation.
Large normal equation systems can be divided into blocks with normalsBlockSize.
This program sets up normal equations based on observation.
Additionally short time and long time variations can be parametrized based on the static parameters
in observation in an efficient way. The observation equations
are divided into time intervals $i \in \{1, ..., N\}$ (e.g. daily) as defined in
inputfileArcList.
With estimateLongTimeVariations additional temporal variations can be co-estimated
for a subset of the parameters selected by parameterSelection.
These parameters might be spherical harmonic coefficients with a limited maximum degree.
The temporal variations are represented by base functions $\Phi_k(t_i)$ (e.g. trend and annual oscillation)
given by parametrizationTemporal.
The temporal base functions are evaluated at the mid time $t_i$ of each interval $i$, multiplicated
with the design matrix $\M A_i$ of the selected parameters, and the design matrix is extended
accordingly.
Figure: Schema of the extended design matrix.
With estimateShortTimeVariations short time variations of the gravity field can be co-estimated.
Their purpose is to mitigate temporal aliasing.
The short time parameters selected by parameterSelection
(e.g. daily constant or linear splines every 6 hour) are constrained by an
autoregressiveModelSequence. If only a static parameter
set is selected the coressponding part of the design matrix is copied and modeled as a constant value
per interval in inputfileArcList additionally so the corresponding temporal factor can be expressed as
\[
\Phi_i(t) =
\begin{cases}
1 &\text{if} \hspace{5pt} t \in [t_i, t_{i+1}) \\
0 & \text{otherwise}
\end{cases}.
\]
Before writing the normal equations to outputfileNormalEquation
short time gravity and satellite specific parameters can be eliminated with eliminateParameter.
Example: For the computation of the mean gravity field ITSG-Grace2018s with additional trend and annual signal
the normal equations are computed month by month and accumulated afterwards (see NormalsAccumulate).
The observations were divided into daily intervals with inputfileArcList.
The static gravity field has been parametrized as spherical harmonics
up to degree $n=200$ in observation:parametrizationGravity.
The trend and annual signals defined by
estimateLongTimeVariations:parametrizationTemporal
were estimated for selected parameters up to degree $n=120$.
To mitigate temporal aliasing daily gravity fields up to degree $n=40$ were setup and constrained
with an autoregressiveModelSequence up to order three.
A detailed description of the approach is given in:
Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties.
J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1.
The normalMatrix must be symmetric.
The rightHandSide must have the same number of rows
and can contain multiple columns for multiple solutions.
The Vector $\M l^T\M P\M l$ is the quadratic sum of observations for each column of the right hand side.
It is used to determine the aposteriori accuracy
\[
\hat{\sigma}^2 = \frac{\hat{\M e}^T\M P\hat{\M e}}{n-m} = \frac{\M l^T\M P\M l - \M n^T\hat{\M x}}{n-m}.
\]If the vector is not given, it is automatically determined by assuming $\hat{\sigma}^2=1$.
The number of observations $n$ is given by the expression observationCount.
The variable observationCount can be used, if it is set by a normal equation file
inputfileNormalEquationObsCount.
The remainingParameters allows the selection
of parameters that will remain, all others will be eliminated. The order of remaining parameters
can be modified via the parameter selection. Block size of the output normal matrix can be adjusted with
outBlockSize. If it is set to zero, the outputfileNormalEquation
is written to a single block file.
For example the normal equations are divided into two groups of
parameters $\hat{\M x}_1$ and $\hat{\M x}_2$ according to
\[
\begin{pmatrix}
\M N_{11} & \M N_{12} \\
\M N_{21} & \M N_{22}
\end{pmatrix}
\begin{pmatrix} \hat{\M x}_1 \\ \hat{\M x}_2 \end{pmatrix}
=
\begin{pmatrix}
\M n_1 \\
\M n_2
\end{pmatrix}.
\]and $\hat{\M x}_2$ shall be eliminated, the reduced system of normal equations is given by
\[
\bar{\M N}\hat{\M x} = \bar{\M n}
\qquad\text{with}\qquad
\bar{\M N}=\M N_{11}-\M N_{12}\M N_{22}^{-1}\M N_{12}^T
\qquad\text{and}\qquad\bar{\M n} = \M n_1 - \M N_{12}\M N_{22}^{-1}\M n_2.
\]
See also NormalsReorder.
This program modifies inputfileNormalEquation in a way
that $\bar{\M x}$ is estimated instead of $\M x$.
\[
\bar{\M x} := \M x + \alpha\, \M x_0,
\]where $\M x_0$ is inputfileParameter and $\alpha$ is factor.
This can be used to re-add reduced reference fields before a combined estimation
at normal equation level.
Therefore the right hand side of the normal equations is modified by
\[
\bar{\M n} := \M n + \alpha\,\M N\M x_0,
\]and the quadratic sum of observations by
\[
\bar{\M l^T\M P\M l} := \M l^T\M P\M l + \alpha^2\,\M x_0^T\M N\M x_0 + 2\alpha\,\M x_0^T\M n
\]
As the normal matrix itself is not modified, rewriting of the matrix can be disabled by setting
writeNormalMatrix to false.
Name
Type
Annotation
outputfileNormalEquation
filename
inputfileNormalEquation
filename
inputfileParameter
filename
x
factor
double
alpha
writeNormalMatrix
boolean
write full coefficient matrix, right hand sides and info files
This program sets up two regularization matrices for two different regional areas.
For a given set of points defined by grid it is evaluated, whether each point
(corresponding to an unknown parameter of a respective parameterization by space localizing basis functions)
is inside or outside a certain area given by border.
Each regularization matrix is a diagonal matrix, one of them features a one if the
point is inside, and a zero if the point lies outside the area. The other matrix features
a zero if the point is inside, and a one if the point lies outside the area
This results in two regularization matrices with
\[
\M R_1+\M R_2=\M I.
\]The two matrices are provided as vectors of the diagonal
in the output files outputfileOutside and outputfileInside.
The regularization matrices are then used by normalEquation:regularization.
As an example, the two different areas could be oceanic regions on the one hand and continental areas on the other hand.
Diagonal regularization matrix from gravity field accuracies,
if not given from signal (cnm,snm), if not given from kaulas rule.
The inverse accuracies $1/\sigma_n^2$ are used as weights in the regularization matrix.
The diagonal is saved as Vector.
Reorder inputfileNormalEquation by selecting parameters in a specific order.
The parameterSelection also allows one to change dimension of the normal equations,
either by cutting parameters or by inserting zero rows/columns for additional parameters.
Without parameterSelection the order of parameters remains the same.
Additionally the block sizes of the files can be adjusted. If outBlockSize is set to zero,
the normal matrix is written to a single block file, which is needed by some programs.
To eliminate parameters without changing the result of the other parameters use NormalsEliminate.
The combined normal equation is extended to include all parameter names uniquely from all input normals.
The input normals are sorted so that parameters with the same name are accumulated.
This requires that the names in each normal equation are unique.
The output can be written as multiple small block files with outBlockSize,
or as single block with outBlockSize=0,
or blocked with respect to the first part of the parameter names (object), if outBlockSize left empty.
Scales rows and columns of a system of inputfileNormalEquation
given by a diagonal matrix inputfileFactorVector $\M S$
\[
\bar{\M N} := \M S \M N \M S \qquad\text{and}\qquad \bar{\M n} := \M S \M n.
\]The estimated solution is now
\[
\bar{\M x} := \M S^{-1} \M x.
\]This is effectively the same as rescaling columns of the design matrix.
This program is useful when combining normal equations from different sources,
for example in case the units of certain parameters don't match.
This program accumulates normalEquation
and solves the total combined system.
The relative weigthing between the individual normals is determined iteratively
by means of variance component estimation (VCE). For a detailed description
of the used algorithm see normalEquation.
The outputfileContribution is a matrix with rows for each estimated
parameter and columns for each normalEquation
and indicates the contribution of the individual normals to the estimated parameters.
Each row sum up to one.
This program computes the linearized and decorrelated equation system for each arc $i$:
\[
\M l_i = \M A_i \M x + \M B_i \M y_i + \M e_i
\]using class observation and writes $\M A_i$, $\M B_i$ and $\M l_i$ as matrix files.
Name
Type
Annotation
outputfileObservationVector
filename
one file for each arc
outputfileDesignMatrix
filename
one file for each arc, without arc related parameters
This program computes the argument of latitude of an orbit
and writes it as instrument file (MISCVALUE(S)).
The data of inputfileInstrument are appended as values to each epoch.
This program computes the beta prime angle (between the orbital plane and earth-sun direction)
and writes it as MISCVALUE(S) instrument file. The angle is calculated w.r.t the sun (per default),
but can be changed.
The data of inputfileInstrument are appended as values to each epoch.
The coordinate system used in the CPF format is usually presented in ITRF.
The required time format for the input orbit file is GPS.
The time format of the output CPF file is given in UTC.
Normally the orbits in GROOPS are given in the celestial reference frame (CRF) with the
origin in the center of mass (CoM). This program rotates the orbit with
earthRotation from CRF to the TRF.
To additionally tranform into the center of solid Earth (CE) frame (or center of Figure (CF)),
a correction can be applied by providing degree one coefficients of a
gravityfield (e.g. ocean tides).
If celestial2terrestrial is set to no, the inverse transformation is applied.
This program generates an instrument file (MISCVALUE(S)) containing the eclipse factor for a given set of orbit.
The data of inputfileInstrument are appended as values to each epoch.
This program converts an instrument file (ORBIT) specified in the celestial reference frame (CRF)
to the GRACE/GRACE-FO SDS format (GNV1B, GNI1B). If earthRotation is provided,
the orbit is rotated into the terrestrial reference frame as required for the GNV1B product; otherwise,
a GNI1B product is written.
The text file inputfileHeader is placed at the beginning of the outputfile.
The text parser is applied so that all variables can be used.
In addition, the times of the data are available with the variables {epochmin}, {epochmax},
and {epochcount}.
Convert groops orbits and corresponding covariance information to ASCII format.
The format is used to publish TUG orbits. It contains a two line header
with a short description of the orbit defined in firstLine.
The orbit is rotated to the Earth fixed frame (TRF) with earthRotation and given as one line per epoch.
The epoch lines contained time [MJD GPS time], position x, y and z [m], and the epoch covariance xx, yy, zz, xy, xz and yz [$m^2$].
This program write satellites positions as gridded data
(outputfileTrackGriddedData) in a terrestrial reference frame. The points are expressed as ellipsoidal coordinates
(longitude, latitude, height) based on a reference ellipsoid with parameters R and
inverseFlattening. The orbit data are given in the celestial frame so earthRotation
is needed to transform the data into the terrestrial frame.
The data of inputfileInstrument are appended as values to each point.
This program computes the magentic field vector($x, y, z$ $[Tesla = kg/A/s^2]$ in CRF))
along an orbit and writes it as instrument file (MISCVALUES).
The data of inputfileInstrument are appended as data columns to each epoch.
Name
Type
Annotation
outputfileMagneticField
filename
instrument file (x,y,z in CRF [Tesla = kg/A/s^2]), ...)
SP3 orbits are usually given in the terrestrial reference frame (TRF), so providing earthRotation
automatically rotates the orbits from the celestial reference frame (CRF) to the TRF.
Since SP3 orbits often use the center of Earth as a reference, a correction from center of mass to center
of Earth can be applied to the orbits by providing gravityfield (e.g. ocean tides).
This program computes the thermosperic state (density, temperature, wind (x,y,z in CRF))
based on emprical models along an orbit
and writes it as instrument file (MISCVALUES).
The wind is given in an celestial reference frame (CRF).
The data of inputfileInstrument are appended as values to each epoch.
Name
Type
Annotation
outputfileThermosphericState
filename
instrument file (MISCVALUES: density, temperature, wind (x,y,z in CRF), ...)
This program computes velocities and accelerations from a given orbit
by differentiating a moving polynomial.
The values are saved in one output file which then contains orbit, velocity and acceleration.
Creates an orbit file of sun, moon, or planets.
The orbit is given in the celestial reference frame (CRF)
or alternatively in the terrestrial reference frame (TRF)
if earthRotation is provided.
Generates a two dimensional xy plot using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org).
A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The plotting area is defined by the two axes axisX/Y. An alternative axisY2
on the right hand side can be added. The content of the graph itself is defined
by one or more layers.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it,
generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory.
With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually
to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value",
see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
Generates a map using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org).
A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The base map is defined by a projection of an ellipsoid (R, inverseFlattening).
The content of the map itself is defined by one or more layers.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it,
generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory.
With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually
to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value",
see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
Plot the coefficients of a inputfileMatrix
using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org).
A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it,
generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory.
With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually
to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value",
see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
Plot the potential coefficients of a spherical harmonic expansion
using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org).
A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
This program plots the formal errors (sigmas).
If gravityfield provides no sigmas
e.g. with setSigmasToZero in gravityfield:potentialCoefficients
the coefficients itself are plotted instead.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it,
generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory.
With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually
to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value",
see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
This program is a simplified version of Gravityfield2TimeSplines.
It reads a series of potential coefficient files (inputfilePotentialCoefficients)
and creates a time splines file with spline degree 0 (temporal block means) or degree 1 (linear splines).
The time intervals in which the potential coefficients are valid are defined between adjacent
points in time given by splineTimeSeries. Therefore one more point in time is needed
than the number of potential coefficient files for degree 0.
The coefficients can be filtered with filter.
If set the expansion is limited in the range between minDegree and maxDegree inclusivly.
The coefficients are related to the reference radius R and the Earth gravitational constant GM.
This program is useful e.g. to combine monthly GRACE solutions to one file.
The coefficients can be filtered with filter and converted
to different functionals with kernel. The gravity field can be evaluated at
different altitudes by specifying evaluationRadius. Polar regions can be excluded
by setting polarGap. If set the expansion is limited in the range between minDegree
and maxDegree inclusivly. The coefficients are related to the reference radius R
and the Earth gravitational constant GM.
The outputfileMatrix contains in the first 3 columns the degree, the degree amplitude, and
the formal errors. For each additional inputfilePotentialCoefficients three columns
are appended: the degree amplitude, the formal errors, and the difference to the first file.
Create a DoodsonHarmonic file from a list of
cos/sin potentialCoefficients for given doodson
(Doodson number or Darwin´s name, e.g. 255.555 or M2) tidal constituents.
If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied before.
Write spherical harmonics in ICGEM format.
GROOPS uses this format as default but this program enables
the possibility to include comments and set the modelname.
Covariance function from Power Spectral Density (PSD).
The inputfilePSD contains in the first column the frequency $[Hz]$, followed by (possibly multiple) PSDs $[unit^2/Hz]$.
The output is a matrix, the first column containing time lag $[s]$ and the other columns the covariance functions $[unit^2]$.
Conversion between PSD $p_j$ and covariance function $c_k$ is performed by discrete cosine transformation:
\[
c_k = \frac{1}{4\Delta t (n-1)}\left(p_0 + p_{n-1} (-1)^k + \sum_{j=1}^{n-2} 2 p_j \cos(\pi jk/(n-1))\right).
\]
See also CovarianceFunction2PowerSpectralDensity.
Name
Type
Annotation
outputfileCovarianceFunction
filename
first column: time steps [seconds], following columns: covariance functions
inputfilePSD
filename
first column: frequency [Hz], following columns PSD [unit^2/Hz]
This programs processes satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration.
Four different observation groups are considered separately: two types of SST and POD1/POD2 for the two satellites.
This program works similar to PreprocessingSst, see there for details. Here only the settings explained,
which are different.
Both SST observation types are reduced by the same background models and the same impact
of accelerometer measurements. The covariance matrix of the reduced observations should not consider
the the instrument noise only (covarianceSst1/2) but must
take the cross correlations covarianceAcc into account.
The covariance matrix of the reduced observations is given by
\[
\M\Sigma(\begin{bmatrix} \Delta l_{SST1} \\ \Delta l_{SST2} \end{bmatrix})
= \begin{bmatrix} \M\Sigma_{SST1} + \M\Sigma_{ACC} & \M\Sigma_{ACC} \\
\M\Sigma_{ACC} & \M\Sigma_{SST2} + \M\Sigma_{ACC}
\end{bmatrix}.
\]
Name
Type
Annotation
outputfileSolution
filename
estimated parameter vector (static part only)
outputfileSigmax
filename
standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation)
outputfileParameterName
filename
estimated signal parameters (index is appended)
estimateArcSigmas
sequence
outputfileSigmasPerArcSst1
filename
accuracies of each arc (SST1)
outputfileSigmasPerArcSst2
filename
accuracies of each arc (SST2)
outputfileSigmasPerArcAcc
filename
accuracies of each arc (ACC)
outputfileSigmasPerArcPod1
filename
accuracies of each arc (POD1)
outputfileSigmasPerArcPod2
filename
accuracies of each arc (POD2)
estimateEpochSigmas
sequence
outputfileSigmasPerEpochSst1
filename
accuracies of each epoch (SST1)
outputfileSigmasPerEpochSst2
filename
accuracies of each epoch (SST2)
outputfileSigmasPerEpochAcc
filename
accuracies of each epoch (ACC)
outputfileSigmasPerEpochPod1
filename
accuracies of each epoch (POD1)
outputfileSigmasPerEpochPod2
filename
accuracies of each epoch (POD2)
estimateCovarianceFunctions
sequence
outputfileCovarianceFunctionSst1
filename
covariance function
outputfileCovarianceFunctionSst2
filename
covariance function
outputfileCovarianceFunctionAcc
filename
covariance function
outputfileCovarianceFunctionPod1
filename
covariance functions for along, cross, radial direction
outputfileCovarianceFunctionPod2
filename
covariance functions for along, cross, radial direction
computeResiduals
sequence
outputfileSst1Residuals
filename
outputfileSst2Residuals
filename
outputfileAccResiduals
filename
outputfilePod1Residuals
filename
outputfilePod2Residuals
filename
observation
choice
obervation equations (Sst)
dualSstVariational
sequence
two SST observations
rightHandSide
sequence
input for observation vectors
inputfileSatelliteTracking1
filename
ranging observations and corrections
inputfileSatelliteTracking2
filename
ranging observations and corrections
inputfileOrbit1
filename
kinematic positions of satellite A as observations
inputfileOrbit2
filename
kinematic positions of satellite B as observations
This program estimates empirical covariance functions of the gradiometer instrument noise and determine arc wise variances to
downweight arcs with outliers. This program works similar to PreprocessingPod, see there for details.
Here only the settings explained, which are different.
This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to
downweight arcs with outliers.
A complete least squares adjustment for gravity field determination is performed by computing the observation
equations, see observation:podIntegral or
observation:podVariational for details. The normal equations
are accumulated and solved to outputfileSolution together with the estimated accuracies
outputfileSigmax. The estimated residuals $\hat{\M e}=\M l-\M A\hat{\M x}$ can be computed with
computeResiduals.
For each component (along, cross, radial) of the kinematic orbit positions a noise covariance function is estimated
\[
\text{cov}(\Delta t_i) = \sum_{n=0}^{N-1} a_n^2 \cos\left(\frac{\pi}{T} n\Delta t_i\right).
\]The covariance matrix is composed of the sum of matrices $F_n$ and unknown variance factors
\[
\M\Sigma = a_1^2\M F_1 + a_2^2 \M F_2 + \cdots + a_N^2\M F_N,
\]with the cosine transformation matrices
\[
\M F_n = \left(\cos\left(\frac{\pi}{T} n(t_i-t_k)\right)\right)_{ik}.
\]
An additional variance factor can be computed (estimateArcSigmas) for each arc $k$ according to
\[
\hat{\sigma}_k^2 = \frac{\hat{\M e}_k^T\M\Sigma^{-1}\hat{\M e}_k}{r_k},
\]where $r_k$ is the redundancy. This variance factor should be around one for normally behaving arcs
as the noise characteristics are already considered by the covariance matrix but bad arcs get a much larger variance.
By applying this factor bad arcs or arcs with large outliers are downweighted.
Name
Type
Annotation
outputfileSolution
filename
estimated parameter vector (static part only)
outputfileSigmax
filename
standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation)
outputfileParameterName
filename
names of estimated parameters (static part only)
estimateArcSigmas
sequence
outputfileSigmasPerArcPod
filename
accuracies of each arc (POD2)
estimateCovarianceFunctions
sequence
outputfileCovarianceFunctionPod
filename
covariance functions for along, cross, radial direction
This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration.
Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites.
This program works similar to PreprocessingPod, see there for details. Here only deviations
in the settings are explained.
Precise orbit data (POD) often contains systematic errors in addition to stochastic noise. In this case the
variance component estimation fails and assigns too much weight to the POD data. Therefore an additional
downweightPod factor can be applied to the standard deviation of POD for the next least squares adjustment
in the iteration. This factor should also applied as sigma in observation
for computation of the final solution e.g. with NormalsSolverVCE.
Short time variations of the gravity field can be co-estimated together with the static/monthly
mean gravity field. The short time parameters must also be set in observation:parametrizationGravity and
can then be selected by estimateShortTimeVariations:parameterSelection.
If these parameters are not time variable, for example when a range of static parameters is selected,
they are set up as constant for each time interval defined in inputfileArcList. The parameters are constrained by an
estimateShortTimeVariations:autoregressiveModelSequence. The weight of
the constrain equations in terms of the standard deviation can be estimated by means of
Variance Component Estimation (VCE) if estimateShortTimeVariations:estimateSigma is set.
The mathematical background of this co-estimation can be found in:
This program integrates an orbit dynamically using the given forces and set up the state transition matrix
for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using
the variational equation approach. The variational equations are computed arc-wise as defined by inputfileOrbit.
This means for each arc new initial state parameters are set up.
In a first step the forces acting on the satellite are evaluated at the apriori positions given
by inputfileOrbit. Non-conservative forces like solar radiation pressure need the orientation of the
satellite (inputfileStarCamera) and additionally, a satellite macro model (satelliteModel)
with the surface properties. Furthermore inputfileAccelerometer observations are also considered.
In a second step the accelerations are integrated twice to a dynamic orbit using a moving polynomial with the degree
integrationDegree. The orbit is corrected to be self-consistent. This means the forces should be evaluated
at the new integrated positions instead of the apriori ones. This correction is computed in a linear approximation
using the gradient of the forces with respect to the positions (gradientfield). As this term is small generally
only the largest force components have to be considered. A low degree spherical harmonic expansion of the static gravity
field (about up to degree 5) is sufficient in almost all cases. In this step also the state transition matrix (the partial
derivatives of the current state, position and velocity) with respect to the initial state is computed.
The integrated orbit together with the state transitions are stored in outputfileVariational,
the integrated orbit only in outputfileOrbit.
To improve the numerical stability a reference ellipse can be reduced beforehand using Enke's method (useEnke).
Mathematically the result is the same, but as the large central term is removed before and restored
afterwards more digits are available for the computation.
The integrated orbit should be fitted to observations afterwards by the programs
PreprocessingVariationalEquationOrbitFit and/or PreprocessingVariationalEquationSstFit.
They apply a least squares adjustment by estimating some satellite parameters (e.g. an accelerometer bias).
If the fitted orbit is too far away from the original inputfileOrbit the linearization may not be
accurate enough. In this case PreprocessingVariationalEquation should be run again with the fitted orbit
as inputfileOrbit and introducing the estimatedParameters as additional forces.
Name
Type
Annotation
outputfileVariational
filename
approximate position and integrated state matrix
outputfileOrbit
filename
integrated orbit
inputfileSatelliteModel
filename
satellite macro model
inputfileOrbit
filename
approximate position, used to evaluate the force
inputfileStarCamera
filename
rotation from body frame to CRF
inputfileAccelerometer
filename
non-gravitational forces in satellite reference frame
The observed orbit positions (inputfileOrbit) together with the epoch-wise covariance matrix
(inputfileCovariancePodEpoch) must be split in the same arcs as the variational equations but not
necessarily uniformly distributed (use irregularData in InstrumentSynchronize). An iterative downweighting of
outliers is performed by M-Huber method.
The observation equations (parameter sensitivity matrix) are computed by integration of the variational equations
(inputfileVariational) using a polynomial with integrationDegree and interpolated to the
observation epochs using a polynomial with interpolationDegree.
All parameters used here must be reestimated in the full least squares adjustment
for the gravity field determination to get a solution which is not biased towards the reference field.
The solutions of additional estimations are relative (deltas) as the parameters are already used as Taylor point
in the reference orbit.
As the relative weighting of the observation types is important complex description of the covariances can be set with
covarianceSst, covariancePod1, covariancePod2.
Name
Type
Annotation
outputfileVariational1
filename
approximate position and integrated state matrix
outputfileVariational2
filename
approximate position and integrated state matrix
outputfileOrbit1
filename
integrated orbit
outputfileOrbit2
filename
integrated orbit
outputfileSolution1
filename
estimated calibration and state parameters
outputfileSolution2
filename
estimated calibration and state parameters
rightHandSide
sequence
input for observation vectors
inputfileSatelliteTracking
filename
ranging observations and corrections
inputfileOrbit1
filename
kinematic positions of satellite A as observations
inputfileOrbit2
filename
kinematic positions of satellite B as observations
This program calculates the coefficients $k_n$ of a kernel:coefficients according to
\[
k_n = \frac{GM}{4\pi R}\frac{\sigma_n}{\sqrt{2n+1}}.
\]from a given gravityfield,
with R and GM describing the reference radius and the geocentric constant, respectively.
The $\sigma_n$
stand for the gravity field accuracies (from degree minDegree to maxDegree), if they are given.
If no accuracies are provided, the $\sigma_n$
represent the square root of the degree variances of the gravity field.
If maxDegree exceeds the maximum degree given by gravityfield,
the higher degrees are complemented by Kaula's rule
The output of the coefficients is given in the file outputfileCoefficients.
useType and ignoreType can be used to filter
the observation types that will be exported.
If inputfileStationInfo is set, RINEX antenna and receiver info
will be cross-checked with the provided file and warnings are raised in case of differences.
A list of semi-codeless GPS receivers (observing C2D instead of C2W) can be provided via
inputfileSemiCodelessReceivers with one receiver name per line.
Observation types will be automatically corrected for these receivers.
Some LEO satellites use special RINEX observation types, either from the unofficial RINEX v2.20
or custom ones. These can be provided via inputfileSpecialObservationTypes.
The file must must contain a table with two columns, the first being the special type,
and the second being the equivalent RINEX v3 type.
Name
Type
Annotation
outputfileGnssReceiver
filename
inputfileRinexObservation
filename
RINEX or Compact RINEX observation files
inputfileMatrixPrn2FrequencyNumber
filename
(required for RINEX v2 files containing GLONASS observations), matrix with columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber
inputfileStationInfo
filename
used to determine semi-codeless receivers and to cross-check antenna and receiver info
inputfileSemiCodelessReceivers
filename
list with one receiver name per line
inputfileSpecialObservationTypes
filename
table mapping special observation types to RINEX 3 types, e.g.: LA L1C
Execute system commands. If executeParallel is set and
multiple commands are given they are executed in parallel at
distributed nodes, otherwise they are executed consecutively at master node only.
Name
Type
Annotation
command
filename
silently
boolean
without showing the output.
continueAfterError
boolean
continue with next command after error, otherwise throw exception
This program creates a satellite macro model for the estimation of non-gravitational accelerations acting on a satellite.
Mandatory input values are the satelliteName, mass, coefficientDrag and information
about the satellite surfaces. For low Earth orbiting satellites, like GRACE for instance, a good guess
for the drag coefficient could be 2.3. Apart from that, it is latter on possible to estimate a more precise variable drag coefficient
(e.g. miscAccelerations:atmosphericDrag), which will override this initial guess.
Concerning the satellite surfaces an external file must be imported which must contain information about each single
satellite plate in terms of plate area, the associated plate normal and re-radiation properties
(reflexion, diffusion and absorption) properties in the visible and IR part. Examplarily, a description of
the macro model for GRACE can be found under:
https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/ProdSpecDoc_v4.6.pdf
Additionally, it is possible to add further information like antennaThrust, solar panel, temporal mass changes and
massInstrument using the modules option.
Name
Type
Annotation
outputfileSatelliteModel
filename
satellite
sequence
satelliteName
string
mass
double
coefficientDrag
double
surfaces
sequence
inputfile
filename
each line must contain one surface element
type
expression
0: plate, 1: sphere, 2: cylinder
area
expression
[m^2]
normalX
expression
normalY
expression
normalZ
expression
reflexionVisible
expression
diffusionVisible
expression
absorptionVisible
expression
reflexionInfrared
expression
diffusionInfrared
expression
absorptionInfrared
expression
specificHeatCapacity
expression
0: no thermal radiation, -1: direct reemission [Ws/K/m^2]
This program converts low-low satellite tracking data (KBR or LRI) from
the GROOPS format instrument file (SATELLITETRACKING)
to the GRACE SDS format (KBR1B or LRI1B).
It reads the satellite tracking data and optionally corrections
(antenna offsets and light time corrections) and flags into one outputfile.
The text file inputfileHeader is placed at the beginning of the outputfile.
The text parser is applied so that all variables can be used.
In addition, the times of the data are available with the variables {epochmin}, {epochmax},
and {epochcount}.
This program simulate accelerometer data. The orientation of the accelerometer
is given by inputfileStarCamera otherwise the celestial reference frame (CRF) is used.
For computation of non-conservative forces a satelliteModel is needed.
This program generates an accelerometer file containing perturbing accelerations
due to a given center of mass (CoM) offset. This includes centrifugal effects,
Euler forces and the effect of gravity gradients.
This program simulates error free gradiometer data along a satellite's orbit.
The orientation of the full tensor gradiometer is given by inputfileStarCamera
otherwise the celestial reference frame (CRF) is used.
The gravity gradients are given by gravityfield and
tides.
This program integrates an orbit from a given force function (dynamic orbit).
The force functions are given by forces.
For computation of non-conservative forces a satelliteModel is needed.
The integration method must be selected with propagator.
Because the orbit data are calculated in the celestial reference frame (CRF) you need
earthRotation to transform the force function
from the terrestrial reference frame (TRF).
The integration start and end time, as well as the sampling, are derived from
the timeSeries option. It is possible to integrate the arc in reverse,
where the initial conditions are assumed to be met at the end time of the timeSeries.
This program simulates tracking data (range, range-rate, range-accelerations)
between 2 satellites. The range is given by
\[
\rho(t) = \left\lVert{\M r_B(t) - \M r_A(t)}\right\rVert = \M e_{AB}(t)\cdot\M r_{AB}(t),
\]with $\M r_{AB} = \M r_B - \M r_A$ and the unit vector in line of sight (LOS) direction
\[\label{sst.los}
\M e_{AB} = \frac{\M r_{AB}}{\left\lVert{\M r_{AB}}\right\rVert}=\frac{\M r_{AB}}{\rho}.
\]Range-rates $\dot{\rho}$ and range accelrations $\ddot{\rho}$ are obtained by differentation
\[\label{obsRangeRate}
\dot{\rho} = \M e_{AB}\cdot\dot{\M r}_{AB} + \dot{\M e}_{AB}\cdot\M r_{AB}
= \M e_{AB}\cdot\dot{\M r}_{AB},
\]\[\label{obsRangeAccl}
\begin{split}
\ddot{\rho} &= \M e_{AB}\cdot\ddot{\M r}_{AB} +\dot{\M e}_{AB}\cdot\dot{\M r}_{AB}
= \M e_{AB}\cdot\ddot{\M r}_{AB} +
\frac{1}{\rho}\left(\dot{\M r}_{AB}^2-\dot{\rho}^2\right). \\
\end{split}
\]with the derivative of the unit vector
\[
\dot{\M e}_{AB}=\frac{d}{dt}\left(\frac{\M r_{AB}}{\rho}\right)
=\frac{\dot{\M r}_{AB}}{\rho}-\frac{\dot{\rho}\cdot\M r_{AB}}{\rho^2}
=\frac{1}{\rho}\left({\dot{\M r}_{AB}-\dot{\rho}\cdot\M e_{AB}}\right).
\]The inputfileOrbits must contain positions, velocities, and acceleration
(see OrbitAddVelocityAndAcceleration).
This program simulates star camera measurements at each satellite's position.
The satellite's orientation follows a local orbit frame with the x-axis in along track (along velocity),
y-axis is cross track (normal to position and velocity vector) and z-axis pointing nadir (negative position vector).
As for non circular orbit the position and velocity are not exact normal, the default is the x-axis to be exact
along velocity and the z-axis forms a right hand system (not exact nadir) or with nadirPointing the z-axis
is exact nadir and x-axis approximates along.
The resulting rotation matrices rotate from satellite frame to inertial frame.
Name
Type
Annotation
outputfileStarCamera
filename
rotation from satellite to inertial frame (x: along, y: cross, z: nadir)
inputfileOrbit
filename
position and velocity defines the orientation of the satellite at each epoch
nadirPointing
boolean
false: exact along and nearly nadir, true: nearly along and exact nadir
This program simulates star camera measurements at each satellite position
of inputfileOrbit.
The resulting rotation matrices rotate from body frame to inertial frame. The body frame refers
to the IGS-specific (not the manufacturer-specific) body frame, as described by
Montenbruck et al. (2015).
The inputfileOrbit must contain velocities
(use OrbitAddVelocityAndAcceleration if needed).
Information about the attitude mode(s) used by the GNSS satellite may be provided via
inputfileAttitudeInfo. This file can be created with
GnssAttitudeInfoCreate. It contains one or more time-dependent entries,
each defining the default attitude mode, the attitude modes used around orbit noon and
midnight, and some parameters required by the various modes.
If no inputfileAttitudeInfo is selected, the program defaults
to a nominal yaw-steering attitude model.
A sufficiently high modelingResolution ensures that the attitude behavior is modeled properly
at all times.
The attitude behavior is defined by the respective mode. Here is a list of the supported
modes with a brief explanation and references:
nominalYawSteering:
Yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) [1]
orbitNormal:
Keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) [1]
catchUpYawSteering:
Yaw at maximum yaw rate to catch up to nominal yaw angle (e.g. GPS-* (noon), GPS-IIR (midnight)) [2, 3]
shadowMaxYawSteeringAndRecovery:
Yaw at maximum yaw rate from shadow start to end, recover after shadow (e.g. GPS-IIA (midnight)) [2]
shadowMaxYawSteeringAndStop:
Yaw at maximum yaw rate from shadow start until nominal yaw angle at shadow end is reached,
then stop (e.g. GLO-M (midnight)) [4]
shadowConstantYawSteering:
Yaw at constant yaw rate from shadow start to end (e.g. GPS-IIF (midnight)) [3]
centeredMaxYawSteering:
Yaw at maximum yaw rate centered around noon/midnight (e.g. QZS-2I, GLO-M (noon)) [4, 8]
smoothedYawSteering1:
Yaw based on an auxiliary Sun vector for a smooth yaw maneuver (e.g. GAL-1) [5]
smoothedYawSteering2:
Yaw based on a modified yaw-steering law for a smooth yaw maneuver (e.g. GAL-2, BDS-3M, BDS-3I) [5, 6]
betaDependentOrbitNormal:
Switch to orbit normal mode if below beta angle threshold (e.g. BDS-2M, BDS-2I, QZS-1) [7, 8]
Figure: Overview of attitude modes used by GNSS satellites
See GnssAttitudeInfoCreate for more details on which satellite uses which attitude modes
and the required parameters for each mode.
This program simulates star camera measurements at each satellite's position for the Sentinel 1A satellite.
The inputfileOrbit must contain positions and velocities (see OrbitAddVelocityAndAcceleration).
The resulting rotation matrices rotate from satellite frame to inertial frame.
Name
Type
Annotation
outputfileStarCamera
filename
inputfileOrbit
filename
position and velocity defines the orientation of the satellite at each epoch
This program simulates outputfileStarCamera measurements at each satellite's position for the Terrasar satellite.
The inputfileOrbit must contain positions and velocities (see OrbitAddVelocityAndAcceleration).
The resulting rotation matrices rotate from satellite frame to inertial frame.
H. Fiedler, E. Boerner, J. Mittermayer and G. Krieger,
Total zero Doppler Steering-a new method for minimizing the Doppler centroid,
in IEEE Geoscience and Remote Sensing Letters, vol. 2, no. 2, pp. 141-145, April 2005, https://www.doi.org/10.1109/LGRS.2005.844591.
Name
Type
Annotation
outputfileStarCamera
filename
rotation from satellite to inertial frame (x: along, y: cross, z: nadir)
inputfileOrbit
filename
position and velocity defines the orientation of the satellite at each epoch
Convert station discontinuities from
SINEX format
(e.g. ITRF20) to outputfileInstrument (MISCVALUE).
A value of 1 means position discontinuity, a value of 2 means velocity discontinuity.
Start and end epochs with value 0 are added in addition to the discontinuities from
SINEX to define continuity interval borders.
Extracts station positions from inputfileSinexSolution
(SINEX format description)
and writes an outputfileInstrument of type VECTOR3D
for each station. Positions will be computed at timeSeries based on position and velocity
of each provided interval in the SINEX file.
With inputfileSinexDiscontinuities the bounds of these time spans are adjusted to the exact epochs of discontinuities.
The inputfileSinexPostSeismicDeformations adds the ITRF post-seismic deformation model to the affected stations.
The inputfileSinexFrequencies adds annual and semi-annual frequencies.
If extrapolateBackward or extrapolateForward are provided, positions will also be computed for epochs
before the first interval/after the last interval, based on the position and velocity of the first/last interval.
Position extrapolation will stop at the first discontinuity before the first interval/after the last interval.
Stations can be limited via stationName, otherwise all stations in inputfileSinexSolution will be used.
Name
Type
Annotation
outputfileInstrument
filename
loop variable is replaced with station name (e.g. wtzz)
Reads metadata like station name, station number, approximate station position and station eccentricities
from Station Eccentricities Sinex File
(une version) and write them to the outputfileStationInfo for each station.
Reference:
Rodriguez J., Otsubo T., Appleby G. Upgraded Modelling for the
Determination of Centre of Mass Corrections of Geodetic SLR
Satellites: Impact on Key Parameters of the Terrestrial Reference
Frame. Journal of Geodesy, 2019. doi: 10.1007/s00190-019-01315-0
This program processes SLR normal point or full rate observations. It calculates the linearized observation equations,
accumulates them into a system of normal equations and solves it.
To calculate observation equations from the passes, the model parameters or unknown parameters need to be
defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined
parametrization.
Some of the parametrization also include a priori models.
Lastly it is required to define the process flow of the SLR processing. This is accomplished
with a list of processingSteps.
Each step is processed consecutively. Some steps allow the selection of parameters, station, or satellites,
which affects all subsequent steps.
The timeSeries is used to precompute Earth rotation and station displacements
with a uniform sampling. In a second step these values are interpolated to the observation epochs.
A sampling of about 10 minutes should be adequate.
It should be noted that GROOPS uses GPS time format, but normal point/full rate data files and CPF files, provided by ILRS data centers
are given in UTC time format.
With satelliteIdentifier a single satellite can be selected if the inputfiles
contain more than one satellites. If satelliteIdentifier is empty the first satellite is taken.
All satellites can be selected with satelliteIdentifier=<all>.
In this case the identifier is appended to each output file.
If earthRotation is provided the data are transformed
from terrestrial (TRF) to celestial reference frame (CRF).
Since SP3 orbits often use the center of Earth as a reference, a correction from center
of Earth to center of mass can be applied to the orbits by providing gravityfield (e.g. ocean tides).
This program converts orientation data measured by a star camera (SRF to CRF)
from the GROOPS format instrument file (STARCAMERA) to the GRACE SDS format (SCA1B).
It reads one inputfileStarCamera and optionally one
inputfileStarCameraFlags containing MISCVALUES(sca_id, qual_rss, qualflg),
and writes one SDS output file.
The text file inputfileHeader is placed at the beginning of the outputfile.
The text parser is applied so that all variables can be used.
In addition, the times of the data are available with the variables {epochmin}, |{epochmax},
and {epochcount}.
If earthRotation is provided, the output file contains quaternions
for rotation from TRF to satellite body frame (IGS/ORBEX convention),
otherwise the rotation is from CRF to satellite body frame.
This program reads SWARM star camera data given in the cdf format
and before converted to an ascii file using the program cdfexport
provided by the Goddard Space Flight Center (http://cdf.gsfc.nasa.gov/).
This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on
the points defined in grid. Depending on the chosen
type (synthesis, quadrature, or leastSquares), the resulting matrix can be used to:
synthesis: Map spherical harmonic coefficients to values on a grid,
quadrature: Integrate grid-based functionals into spherical harmonic coefficients by
a simple quadrature formula,
leastSquares: Estimate coefficients from grid data via a least squares approach.
he spherical harmonic degree range is constrained by
minDegree and maxDegree, and the ordering of the coefficients is given by
numbering. The reference gravitational
constant is GM, and the reference radius is R.
The computed matrix is written to outputfileMatrix with dimensions
(number of grid points) $\times$ (number of spherical harmonic coefficients). For
type = leastSquares, the program applies a QR-based pseudo-inverse so that the
output matrix can directly form the normal-equation building blocks for a blockwise
least-squares solution in spherical harmonic space.
This program computes the design matrix of temporal representation at a given time series.
The output matrix contains the time steps in MJD in the first column, the other columns contain the design matrix.
The intention of this program is to visualize the parametrization together with PlotGraph.
This program reads in TerraSar-X or Tandem-X orbits in the special CHORB format and takes the appropriate
time frame as stated in the document header.
A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-002.pdf
This program converts the output (neutral mass density,temperature) of an empirical thermosphere model (e.g. JB2008) on a given grid.
Additionally, also the thermospheric winds estimated by using the horizontal wind model HWM 2014 can be assessed.
The time for the evaluation can be specified in time. The values will be saved together with points expressed as ellipsoidal coordinates
(longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.
Figure: JB2008 model in 300 km height at 2003-07-01 12:00.
Name
Type
Annotation
outputfileGriddedData
filename
density [kg/m**3], temperature [K], wind (x, y, z) [m/s**2]
For each epoch a gridded data file is written where
the variableLoopTime and variableLoopIndex are expanded for
each point of the given time series to create the file name for this epoch
(see text parser).
The number of input data columns must be a multiple of the number $n$ of grid points.
If isGroupedDataByPoint is true the inputfileTimeSeries starts
with all data (data0, data1… ) for the first point, followed by all data of the second point and so on.
If isGroupedDataByPoint is false, the file starts with data0 for all points, followed by all data1 and so on.
Interpret the data columns of inputfileTimeSeries
as potential coefficients. The sequence of coefficients is given by
numbering starting from data column startDataFields.
For each epoch a outputfilesPotentialCoefficients
is written where the variableLoopTime and variableLoopIndex are expanded for
each point of the given time series to create the file name for this epoch,
see text parser.
This program computes the outputfileOrbit
from two-line elements (TLE/3LE)
as can be found at e.g. http://celestrak.org/NORAD/elements/.
The first satellite in the input file that matches the wildcard of satelliteName is used.
If more records with exactly the same name are found, the one with the closest reference epoch
is used for each point in the timeSeries.
The program uses the Simplified General Perturbation (SGP) model. More information can
be found in the Revisiting Spacetrack Report 3 by Vallado et al. 2006.
Represents a sequence of multivariate autoregressive (AR) models with increasing order $p$.
The AR models should be stored as matrix file in the GROOPS definition of
AR models.
The required AR models can be computed with CovarianceMatrix2AutoregressiveModel,
and passed to this class through
inputfileAutoregressiveModel in increasing order.
The corresponding normal equation coefficient matrix is given by
\[
\label{eq:ar-normals}
\bar{\mathbf{\Phi}}^T\bar{\mathbf{\Sigma}}^{-1}_{\bar{\mathbf{w}}}\bar{\mathbf{\Phi}}
\]and if all AR models are estimated from the same sample its inverse is a block-Toeplitz covariance matrix
\[
(\mathbf{\Sigma}_{\mathbf{y}_m})_{ij} =
\begin{cases}
\mathbf{\Sigma}(|j-i|) & \text{for } i \leq j \\
\mathbf{\Sigma}(|j-i|))^T & \text{otherwise}
\end{cases},
\]which can be computed using AutoregressiveModel2CovarianceMatrix.
A detailed description with applications can be found in:
Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties.
J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1
With this class you can select one or more region on the surface of the Earth.
In every instance of Border you can choose whether the specific region is excluded
from the overall result with the switch exclude.
To determine whether a specific point will be used furthermore the following algorithm will be applied:
In a first step all points are selected if first border excludes points otherwise all points excluded.
When every point will be tested for each instance of border from top to bottom.
If the point is not in the selected region nothing happens.
Otherwise it will included or excluded depending on the switch exclude.
First Example: The border excludes all continental areas.
The result are points on the oceans only.
Second Example: First border describes the continent north america. The next borders
excludes the great lakes and the last border describes Washington island.
In this configuration points are selected if they are inside north america
but not in the area of the great lakes. But if the point is on Washington island
it will be included again.
Rectangle
The region is restricted along lines of geographical coordinates.
minPhi and maxPhi describe the lower and the upper bound of the region.
minLambda and maxLambda define the left and right bound.
Name
Type
Annotation
minLambda
angle
maxLambda
angle
minPhi
angle
maxPhi
angle
exclude
boolean
dismiss points inside
Cap
The region is defined by a spherical cap with the center given in geographical coordinates
longitude (lambdaCenter) and latitude (phiCenter).
The radius of the cap is given as aperture angle psi.
Figure: spherical cap
Name
Type
Annotation
lambdaCenter
angle
longitude of the center of the cap
phiCenter
angle
latitude of the center of the cap
psi
angle
aperture angle (radius)
exclude
boolean
dismiss points inside
Polygon
The region is defined by inputfilePolygon
containing one or more polygons given in longitude and latitude.
An additional buffer around the polygon can be defined.
Use a negative value to shrink the polygon area.
This reference manual details classes included in GROOPS, describing what they are and what they do.
For usage examples see the cookbook in the documentation overview.
Check for a file or directory existing.
Supports wildcards * for any number of characters and ? for exactly one character.
Files smaller than minSize are treated as non-existent.
Name
Type
Annotation
file
filename
supports wildcards: * and ?
minimumSize
uint
minimum file size in byte.
Command
Execute command and check success.
Name
Type
Annotation
command
filename
silently
boolean
without showing the output.
Expression
Evaluate expression.
Name
Type
Annotation
expression
expression
Matrix
Evaluate elements of a matrix based on an expression.
If all=yes, all elements of the matrix must evaluate to true
for the condition to be fulfilled, otherwise any element evaluating to true is sufficient.
Determines if there is a match between a pattern and some subsequence in a string.
Supports wildcards * for any number of characters and ? for exactly one character.
If isRegularExpression is set, pattern is interpreted as a
regular expression instead. In any case, the text parser
is applied beforehand.
Name
Type
Annotation
string
filename
should contain a {variable}
pattern
filename
supports wildcards: * and ?
isRegularExpression
boolean
pattern is a regular expression
caseSensitive
boolean
treat lower and upper case as distinct
StringMatchPattern
Determines if a pattern matches the entire string.
Supports wildcards * for any number of characters and ? for exactly one character.
If isRegularExpression is set, pattern is interpreted as a
regular expression instead. In any case, the text parser
is applied beforehand.
Name
Type
Annotation
string
filename
should contain a {variable}
pattern
filename
supports wildcards: * and ?
isRegularExpression
boolean
pattern is a regular expression
caseSensitive
boolean
treat lower and upper case as distinct
And
All conditions must be met (with short-circuit evaluation).
================================================
FILE: docs/html/cookbook.gnssNetwork.html
================================================
GROOPS - GNSS satellite orbit determination and station network analysis
GNSS satellite orbit determination and station network analysis
This cookbook chapter describes an example of global GNSS processing as done by analysis centers of the
International GNSS Service (IGS). Resulting products usually comprise:
Satellite orbits, clocks, and signal biases
Station positions, clocks, signal biases, and troposphere estimates
Earth orientation parameters
Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections
can be found in a doctoral thesis available under DOI 10.3217/978-3-85125-885-1.
Note: Global GNSS processing can become very computationally intensive. Depending on the number of satellites
and stations, the observation and processing sampling, and parametrizations it can quickly exceed the capabilities
of a normal desktop computer and may require computer clusters or number crunchers (see sectionParallelization).
Data preparation
Most of the required metadata files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops.
These files are regularly updated.
Data that has to be gathered from other sources comprises:
The example scenario includes a small set of this data.
The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
Prepare a station list file that contains the stations to be processed.
Each line can contain more than one station. The first station in each line that has data available is used for the processing.
If your network contains more than 60-70 stations, it is recommended to start processing with a core network (see Advanced).
In this case, define an additional core station list file that can also have multiple stations per line.
Preprocessing: Orbit integration
Numerical integration of the satellite orbits is the first step in global GNSS processing.
Dynamic orbits are integrated based on force models and then fitted to the approximate orbits
by estimating their initial state and additional empirical parameters for solar radiation pressure to improve the orbit fit.
The resulting variational equations file contains the integrated orbit, derivatives
with respect to the satellite state vector, attitude, Earth rotation and satellite model.
Orbit preprocessing is covered by the script 020groopsGnssPreprocessing.xml in the example scenario.
It is recommended to perform the steps below in a loop over all
satellites/PRNs using LoopPrograms. To get the relation between {prn} and {svn} setup
an additional loop:platformEquipment inside
loop:loop with
This second loop should perform only one step. The following programs are looped over all {prn}:
InstrumentResample: resample approximate orbits from data preparation to target sampling (e.g., 1 minute) by defining a timeSeries based on a method:polynomial (polynomialDegree=7, maxDataPointRange=7200, maxExtrapolationDistance=900).
OrbitAddVelocityAndAcceleration: add velocity via running polynomial (polynomialDegree=2) derivation (needed for attitude computation)
The script 030groopsGnssProcessing.xml in the example scenario
implements the following steps and settings.
These are the settings for GnssProcessing. If not otherwise stated use the default values.
The first step is setting the processing sampling, in this example it is 30 seconds.
The processing interval usually is a single 24-hour day, therefore define
timeSeries:uniformSampling with timeStart=<mjd>,
timeEnd=<mjd>+1, sampling=30/86400 (processing sampling).
Add the appropriate transmitters:gnss (e.g. GPS, GLONASS, and Galileo)
and provide the required files:
Finally, define the processingSteps.
This can be overwhelming at first, but offers a lot of flexibility.
The example script uses a 5-minute processing sampling with subsequent clock densification to 30 seconds.
selectEpochs: with nthEpoch=10 to reduce sampling to 5 minutes.
selectParametrizations:
disable constraint.STEC, *VTEC, *.tecBiases as the ionosphere parameters are estimated in the final steps only.
With some additional steps, the full 30-second sampling can be used to estimate all parameters (not only the clocks).
These steps are disabled in the example script, as they require at least 16 GB of system memory.
In this case, it is not necessary to separately write the 30-second clock files as listed above.
selectEpochs: with nthEpoch=1 to set full 30-second sampling
selectNormalsBlockStructure: As the system of normal
equations can be very large, the memory consumption might be reduced with keepEpochNormalsinMemory=no.
In this case the epoch parameters are directly eliminated during the accumulation and reconstructed in the solving step.
This might lead to longer computation times.
estimate: with maxIterationCount=2: final iterations with full sampling
Advanced: Processing large station networks
Processing large station networks requires some additional steps to keep the computational load to a reasonable degree.
The general processing strategy is to first process a well-distributed subset of stations (i.e. a core network)
to get good estimates of all satellite parameters, which then enables integer ambiguity resolution (IAR). Once
the ambiguities of the core network are resolved and stable estimates for satellite phase biases are available, all
other (non-core) stations can be processed individually (including IAR) while keeping the satellite parameters fixed.
At last, all stations can be processed together with all satellite parameters and ionosphere parameters.
This cookbook chapter describes an example of GNSS precise point positioning (PPP) for a ground station using GPS, GLONASS, and Galileo.
For information on how to generate the GNSS products (orbits, clocks, signal biases, etc.) required for PPP,
see the cookbook GNSS satellite orbit determination and station network analysis.
Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections
can be found in a doctoral thesis available under DOI 10.3217/978-3-85125-885-1.
Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the
IGS Data Centers.
GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of
Graz University of Technology's contribution to IGS repro3.
The example scenario includes a small set of this data.
The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
Prepare a station list file that contains the stations (one per line) to be processed.
Processing of a ground station
The script 02groopsGnssProcessing.xml in the example scenario
implements the following steps and settings.
These are the settings for GnssProcessing. If not otherwise stated use the default values.
The first step is setting the processing sampling, in this example it is 30 seconds.
The processing interval usually is a single 24-hour day,
timeSeries:uniformSampling with timeStart=<mjd>,
timeEnd=<mjd>+1, sampling=30/86400 (processing sampling).
useType: We recommend to explicitly specify the signals to be processed
and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG,
C1WG, C2WG, L1*G, L2*G, ...).
excludeType: Signals you might want to exclude are L5*G (GPS L5 phase due to time-variable bias on block IIF satellites), *3*R (GLONASS G3 freq.), *6*E (Galileo E6 freq.)
Add the following parametrizations
and define the outputfiles you are interested in inside each of them:
When processing multiple stations at the same time, moving estimate
and resolveAmbiguities into the processing step
forEachReceiverSeparately sets up and solves
the normal equations independently for each station.
================================================
FILE: docs/html/cookbook.gravityFieldGrace.html
================================================
GROOPS - GRACE gravity field recovery
GRACE gravity field recovery
This cookbook chapter describes an example of estimating a gravity field solution using GRACE observation
data. For the respective month a set of spherical harmonic coefficients up to a maximum degree is determined.
An example scenario for this task can be found at
https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioGraceGravityfieldRecovery.zip
including the required GROOPS scripts and data sets for the gravity field recovery process.
The provided scenario consists of a slightly simplified example as compared to the operational one.
The background models are provided at https://ftp.tugraz.at/pub/ITSG/groops/data/.
Background models
The following background models were used during the data processing:
These models were reduced during the analysis process and are not present in the solution.
The GOCO06s
model was used as the static gravity field as well as for the trend component and annual oscillation.
In the script 000groopsBackgroundModels.xml a monthly mean of the GOCO06s including the time-variable components is determined
in form of time splines using Gravityfield2TimeSplines. This model is later added back to the final gravity solution.
Instrument data preparation
The ITSG gravity field solutions are computed from the official GRACE L1B JPL (2018)
and GRACE-FO L1B JPL (2019) observation data. The data sets for this example are provided
in GROOPS file format in the scenario folder.
The satellite-to-satellite-tracking (SST) data consists of:
K-band range rates
Light time correction
Antenna offset corrections
Additional observation data required for the processing comprises:
Data preparation is handled in the script 010groopsInstruments.xml. The approximate orbits (initial dynamic orbits)
of the satellites, the star camera observations, the accelerometer data and the thruster data are resampled with a 5s sampling
and small gaps in the data are filled using InstrumentResample. Gross outliers are removed using InstrumentRemoveEpochsByCriteria
and the data is synchronized using InstrumentSynchronize.
The approximate orbits are later used as a priori information for the dynamic orbit integration.
In addition to the observed orientation of the spacecrafts (star camera observations),
the nominal orientation is computed using SimulateStarCameraGrace. The difference between
observed and simulated orientation is determined using InstrumentStarCameraMultiply
and is employed in the outlier detection.
The accelerometer data is initially calibrated by estimating a bias using
InstrumentAccelerometerEstimateBiasScale with respect to simulated data created
with SimulateAccelerometer. For simulating accelerometer data a satellite model
implying the satellite's mass and surfaces is required. Such a model can be created with
SatelliteModelCreate. Models for the GRACE and GRACE-FO satellites are also provided
at https://ftp.tugraz.at/pub/ITSG/groops/data/satelliteModel/. Non-gravitational forces
comprising atmospheric drag, solar radiation pressure and albedo have to be modeled when simulating
the accelerometer data. The acceleration bias parameters are determined as degree 3 time splines
with 6h nodes. When determining these parameters the thruster events are excluded from the estimation.
The SST observations, the light time corrections and the antenna center corrections are synchronized
with a 5s sampling together with simulated SST data created with SimulateSatelliteTracking.
Simulated data is used for the outlier detection of the original SST observations.
The sampling of the kinematic orbits is reduced to 60s using InstrumentReduceSampling and
an outlier detection is performed using the approximate dynamic orbits.
The approximate orbits, the star camera observations and the accelerometer data are divided into 24h arcs
(variational arcs). The kinematic orbits, its 3x3 epoch covariances, KBR observations,
light time corrections, antenna center corrections and star camera observations are divided into 3h arcs
per day (short arcs). Additionally the approximate orbits and the star camera observations are also
synchronized to short arcs.
In this processing step dynamic orbits are computed for a complete 24h orbit arc by integrating
the forces acting on the GRACE/GRACE-FO satellites. Additionally, the state transition matrix is set up.
The dynamic orbits are then fitted to kinematic orbits and SST observations in a least squares adjustment
by co-estimating additional accelerometer calibration parameters together with the initial state vector.
The newly estimated parameters are then used to re-estimate the dynamic orbits and setting up the new
state transition matrix.
The script 020groopsVariational.xml in the scenario folder implements the required processing steps.
Time splines from a time-variable gravity field are estimated using Gravityfield2TimeSplines.
In this step the static gravity field (GOCO06s) is combined with the following time-variable components:
The observation equations (parameter sensitivity matrix) are computed by integration of the variational
equations (inputfileVariational) using a polynomial with
integrationDegree=7. PreprocessingVariationalEquationOrbitFit has to be
executed per satellite.
The dynamic orbit and the resulting accelerometer calibration parameters are now used to re-integrate
the orbit once more using PreprocessingVariationalEquation and introducing
parametrizationAcceleration as estimatedParameters.
This step usually ensures convergence. If the maximum orbit difference is still not sufficient this step
can be repeated again.
Preprocessing
The script 030groopsPreprocessing.xml implements the following steps and settings.
The program PreprocessingSst processes SST observations and kinematic orbit data, and
performs a complete least squares adjustment for gravity field determination by computing
the observations equations. It also allows for an iterative refinement of the stochastic model
of the observations along with arc-wise variance factors through variance component estimation (VCE).
Force model parameters (gravitational potential coefficients and accelerometer calibration parameters)
are computed by integrating the parameter sensitivity matrix from the variational equations.
Parameters describing effects due to the SST observation system and geometry (KBR antenna phase
center variations) are computed using the dynamic orbits as a Taylor point.
Short time gravity variations can be co-estimated together with the monthly mean gravity field.
The autoregressive model sequence constraining the short time parameters is provided in the data folder.
It is precomputed from hydrology and nontidal atmospheric and ocean background models.
See Kvas 2019 for more information about
this co-estimation.
Normal equations are set up in the script 040groopsMonthlyNormals120.xml using
the program NormalsBuildShortTimeStaticLongTime. The time intervals which the normal
equations are divided into are defined in inputfileArcList.
The normal equations are based on observation including the SST data,
the kinematic orbits and the variational equations. The parametrization of the gravity field can
be set with observation:parametrizationGravity
(e.g. spherical harmonics up to degree and order 120). Accelerometer calibration parameters
and KBR antenna phase center variations can be parameterized using
parametrizationAcceleration and
parametrizationSst.
With estimateShortTimeVariations short time variations of the gravity
field can be co-estimated. The parameters selected by
parameterSelection (e.g. linear splines with 6h nodes) are
constrained by an autoregressiveModelSequence.
Additional temporal variations (e.g. trend and annual oscillation) could be estimated with
estimateLongTimeVariations.
Solving normal equations
The desired spherical harmonic coefficients are determined in the script 050groopsMonthlySolve.xml.
NormalsSolverVCE accumulates normalEquation and solves
the total combined system. Variance component estimation is used to determine the relative weighting
of the individual normals, i.e. the arc-wise variances. The previously computed stochastic model of
the observations remains unchanged. The estimated parameter vector (outputfileSolution),
the estimated accuracies (outputfileSigmax) and the full covariance matrix
(outputfileCovariance) can be saved.
Using Gravityfield2PotentialCoefficients the final solution can be saved as
a spherical harmonics file by adding back the monthly mean gravity
field to the estimated spherical harmonic coefficients.
================================================
FILE: docs/html/cookbook.gravityFieldPod.html
================================================
GROOPS - Gravity field determination from POD data
Gravity field determination from precise orbit data (POD)
This cookbook chapter describes exemplarily the steps for determining the monthly gravity variations from precise orbit data (POD).
Step 1: Preperation of data
Following data have to be prepared monthly with an adequate sampling, e.g. 10 s using
InstrumentConcatenate:
Precise (kinematic) orbit data
3x3 covariance matrices data
Initial orbit data used for precise orbit determination
Star camera data
Accelerometer data
Reduced sampling can be achieved by InstrumentReduceSampling. If the satellite mission does not provide any required
accelerometer data, these data can be generated via SimulateAccelerometer.
For satellite missions with less knowledge about the acting forces, it makes sense to consider more than one state vector within an orbit revolution.
Otherwise the accuracy of the estimated parameters will decrease. This implies that shorter arcs are necessary. The assignment of the kinematic orbit
data as well as the 3x3 covariance matrices data to the arcs can be done with InstrumentSynchronize.
Step 2: Conversion of the background gravity field
For determining the accuracies and weights of the kinematic orbits it is sufficient to make a least-square estimation with only certain parameters, due
to the fact that some parameters do not influence the estimation of the accuracies and weights.
This estimation is done with PreprocessingPod. Additionally, this program determines the temporal correlation of the kinematic orbit positions
x, y and z. If short arcs are used the setting observation:podIntegral shall be used. This setting
considers the frictional forces by means of a macro model as well as the conservative and non-conservative forces.
Step 4: Solving of normal equations system
NormalsSolverVCE sets up the observation equations and summarized them to a normal equations system. The subsequent least-square estimation delivers
the parameters surcharges.
Step 5: Determination of the estimated gravity field parameters
The estimated parameters result from the re-addition of the background field, which is done in MatrixCalculate.
Step 6: Conversion of the gravity field parameters
================================================
FILE: docs/html/cookbook.instrument.html
================================================
GROOPS - Instrument data handling
Instrument data handling
GROOPS provides functions and programs to read/write, preprocess, analyze and visualize uniformly and non-uniformly sampled instrument data.
This includes tools for filter design and analysis, re-sampling, smoothing, detrending, and power spectrum estimation.
This tutorial goes through exemplary steps for data handling procedures.
Reading data
GROOPS is able to read and convert relevant data from various LEO and GNSS satellites. Instrument files need to be converted into the respective GROOPS format using conversion programs.Depending on the content of the input file, the data is stored with a specific instrument type. User also has the option to change the type later on with InstrumentSetType.
Multiple files can be concatenated to one file using InstrumentConcatenate. Using this program, it is also possible to sort the epochs, remove the duplicates and NaN values.
Example: Concatenating instrument files
Create three successively daily sinusoidal signals with TimeSeriesCreate and set their type to MISCVALUE with InstrumentSetType. In this example, each data set has an overlap of 1 hour with their following dataset.
Merge all datasets to one single file with InstrumentConcatenate.
Figure: Example 1: Concatenating instrument files into one dataset.
Many measurements involve data collected asynchronously by multiple sensors with different sampling. Use InstrumentSynchronize for a continuous harmonization of the data over time or segmentation of the data into arcs.
Preprocessing
Real-world data is often incomplete, inconsistent, and/or lacking in certain behaviors or trends, and is likely to contain many errors. Data preprocessing is a proven method of resolving such issues. Following steps are usually required to be taken:
Gross outlier removal:
Create reference values to compare the input data with. Depending on the instrument type, this can be done by simulation programs such as SimulateAccelerometer or SimulateStarCamera. If no reference data is available, the outlier detection is based on the data itself. If needed, synchronize the reference data file and the input data with InstrumentSynchronize.
In case of star camera data, compute the differences between the input data and the reference data with InstrumentStarCameraMultiply.
Set a threshold for outlier detection in InstrumentRemoveEpochsByCriteria. The threshold is defined empirically according to the accuracy characteristics of each data products. If the differences exceed a predefined threshold, the corresponding epochs are removed. An arbitrary margin can be defined to additionally remove epochs before and after the identified outliers. It is also possible to remove epochs at specific times using InstrumentRemoveEpochsByTimes.
Apply a lowpass filter (e.g. Butterworth) with the Nyquist frequency of the target sampling as cutoff with InstrumentFilter. Apply the filter in both directions to avoid phase shifts.
For a general instrument file, InstrumentDetrend subtracts offsets or linear/nonlinear trends from the input data. This can be achieved also with FunctionsCalculate or InstrumentArcCalculate by applying determined calibration factors or solving a least-square adjustment.
For accelerometer data, InstrumentAccelerometerEstimateBiasScale is designed to estimate and subtract complex biases or scales with respect to simulated accelerometer data. If a thruster file is given, the corresponding epochs are eliminated during estimation process.
Example: GRACE-C accelerometer calibration
For one particular date, read and convert Level-1B GRACE-C orbit, star camera, accelerometer, and thruster data with GraceL1b2Orbit, GraceL1b2StarCamera, GraceL1b2Accelerometer, and GraceL1b2Thruster respectively. It is also required to read the macro-model data of the satellite using the related information in the official document and convert it to GROOPS format with SatelliteModelCreate.
Calibrate the real measurements with a daily constant accelerometer bias by choosing a constant parameter per axis in parametrizationAcceleration:accBias.
Figure: Example 3: Calibrating GRACE-C ACT1B data.
Spectral analysis studies the frequency spectrum contained in discrete, uniformly sampled data. The Fourier transform is a tool that reveals frequency components of a signal by representing it in frequency space. The Power Spectral Density (PSD) is a measurement of the energy at each frequency.
If the sampling is irregular use InstrumentResample to make the sampling equidistant.
Create a sinusoidal signal with an amplitude of 1.0 using TimeSeriesCreate and set its type to MISCVALUE with InstrumentSetType. Interpret this data as a simulation data file.
Add zero-mean, white Gaussian noise with a standard deviation of 0.1 with NoiseInstrument. Interpret this data as a real measurement file.
Compute PSD of the simulated and measurement data and represent the results with PlotGraph.
Figure: Example 4: Spectral analysis of a synthetic signal.
Data visualization
Argument of latitude plot
Plotting instrument data as a function of satellite position in orbit and time reveals features related to the orbit geometry or environmental conditions. For circular orbits, the position of satellite can be specified by the argument of latitude.
Synchronize the instrument data file with the related orbit data using InstrumentSynchronize.
Plot the instrument data versus argument of latitude and time with PlotGraph.
Example: Argument of latitude representation of GRACE-C eclipse factors
Compute eclipse factors at each epoch of GRACE-C orbit at an arbitrary time using Orbit2EclipseFactor.
Synchronize the eclipse factor data file with the related orbit data using InstrumentSynchronize.
Use Orbit2ArgumentOfLatitude to compute argument of latitude at each epoch and visualize the results with PlotGraph.
Figure: Example 5: GRACE-C eclipse factors represented in argument of latitude plot.
Ground-track plot
Plotting instrument data with respect to the satellite ground track is useful to identify any features of geophysical origin in the data.
Synchronize the instrument data file with the related orbit data using InstrumentSynchronize.
Use Orbit2Groundtracks to map instrument data to satellite ground-track.
Example 6: Ground-track representation of GRACE-C eclipse factors
Compute eclipse factors at each epoch of GRACE-C orbit at an arbitrary time using Orbit2EclipseFactor.
Synchronize the eclipse factor data file with the related orbit data using InstrumentSynchronize.
Use Orbit2Groundtracks to generate the gridded data. Each grid value represents the mean value of eclipse factor over the instrument time period (1 month). visualize the results with PlotMap.
Figure: Example 6: GRACE-C eclipse factors represented in ground-track plot.
================================================
FILE: docs/html/cookbook.kinematicOrbit.html
================================================
GROOPS - Kinematic orbit determination of LEO satellites
Kinematic orbit determination of LEO satellites
This cookbook chapter describes exemplarily the steps for determining kinematic orbits of low-Earth orbit (LEO) satellites.
Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the
IGS Data Centers.
GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of
Graz University of Technology's contribution to IGS repro3.
The example scenario includes a small set of this data.
The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
If you want to process another mission, you can create the necessary files with these steps:
For creating the GnssSatelliteInfo file use PlatformCreate. Note that the rotation from the
satellite reference frame into the antenna reference frame, as well as the change of the center of mass due to fuel consumption, has to be considered here.
Set the patterns for code (type=C**) and phase
(type=L**). The standard deviation is expressed e.g. with values=0.001/cos(2*PI/180*zenith).
Prepare LEO data
The example scenario includes a small set of this data for the GRACE-FO mission.
The script 020groopsConvertGracefo.xml can be used to convert these external formats into GROOPS formats.
The data preparation steps are:
Conversion of the approximate orbit and star camera data into GROOPS format using a conversion program.
For interpolating the orbit and star camera data to GNSS receiver epochs use InstrumentResample and provide the converted RINEX observation file as input for
timeSeries:instrument.
useType: We recommend to explicitly specify the signals to be processed
and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG,
C1WG, C2WG, L1*G, L2*G, ...).
Add the following parametrizations
and define the outputfiles within you are interested in:
This shows the exemplary computation of a regional geoid using terrestrial gravimetric observations
in combination with a global satellite model such as GOCO06s. The geoid is estimated in a least squares adjustment with
a parametrization using radial basis functions. A detailed desciption of the method is given in
Christian Pock (2017), Consistent Combination of Satellite and Terrestrial Gravity Field Observations in Regional Geoid Modeling.
Dissertation TU Graz.
Gravimetric data
Here it is assumed that the measured absolute gravity data is given at points in ellipsoidal coordinates.
The observed values should be converted to SI units $m/s^2$.
A high resolution topography model is needed to reduce the observations.
As the model heights are usually given in physical heights a reference geoid is needed to compute the correct ellipsoidal height.
GriddedDataCalculate: Generate a new combined griddedData file
with the orthometric height (data0) and the geoid height (data1).
GriddedTopography2PotentialCoefficients: Compute the gravitational potential in terms
of spherical harmonics up to a maximum degree of the global satellite model.
This is the part of the topography, which is already included in the global satellite model.
The integration boundaries are radialUpperBound=data0+data1
and radialLowerBound=data1.
Figure: Topography and geoid heights
Reduce
Calculate approximate reference gravity to reduce it from the observations.
Figure: Gravity disturbances: observed minus computed
Radial Basis Functions (RBF)
The residual gravity is parametrized in terms of Radial Basis Functions
parametrizationGravity:radialBasis.
The basis functions should be distributed on a regular grid covering
a somewhat larger area than the observations, see border.
The shape of the functions kernel:coefficients should reflect
the signal content of reduced observations and are defined by the coefficients.
maxDegree=7000. Complemented by Kaula's rule of thumb
The maximum degree should correspond to the spatial resolution.
Rule of thumb: the number of spherical harmonic coefficients $(\text{maxDegree}+1)^2$ should roughly agree
to the number of grid points if they would cover the complete Earth.
Figure: Degree amplitudes for the shape of the radial basis functions
Compute: Estimate parameters in a least squares adjustment
Setup the observation equations and accumulate the system of normal equations.
Provides arc-wise covariance matrices for precise orbit data.
Temporal correlations are modeled in the orbit system (along, cross, radial).
The inputfileCovarianceFunction provides temporal covariance functions for each axis.
From the diagonal matrix for each time step
\[
Cov_{3\times3}(t) = \text{diag}(cov_x(t), cov_y(t), cov_z(t))
\]the Toeplitz covariance matrix for an arc is constructed
\[
\M C = \begin{pmatrix}
Cov(t_0) & Cov(t_1) & \cdots & & & \\
Cov(t_1) & Cov(t_0) & Cov(t_1) & \cdots & & \\
\cdots & Cov(t_1) & Cov(t_0) & Cov(t_1) & \cdots & \\
& \cdots & \ddots & \ddots & \ddots & \cdots \\
\end{pmatrix}
\]
The epoch-wise $3\times3$ covariance matrices given by inputfileCovariancePodEpoch
are eigenvalue-decomposed
\[
\M C_{3\times3}(t_i) = \M Q \M\Lambda \M Q^T,
\]where $\M Q$ is an orthogonal matrix and $\M\Lambda$ diagonal.
This is used to split the covariances matrices
\[
\M C_{3\times3}(t_i) = \M D(t_i) \M D(t_i)^T = (\M Q \M\Lambda^{1/2} \M Q^T)(\M Q \M\Lambda^{1/2} \M Q^T)^T,
\]and to compose a block diagonal matrix for an arc
\[
\M D = \text{diag}(\M D(t_1), \M D(t_2), \ldots, \M D(t_2)).
\]
The complete covariance matrix of an arc is given by
\[
\M C_{arc} = \sigma_0^2 \sigma_{arc}^2 \M D \M C \M D^T +
\text{diag}(\sigma_1^2\M I_{3\times3}, \sigma_2^2\M I_{3\times3}, \ldots, \sigma_n^2\M I_{3\times3})
\]where sigma $\sigma_0$ is an overall factor
and the arc specific factors $\sigma_{arc}$ can be provided with inputfileSigmasPerArc.
The last matrix can be used to downweight outliers in single epochs and will be added if
inputfileSigmasPerEpoch is provided.
Name
Type
Annotation
covariancePodType
sequence
sigma
double
general variance factor
inputfileSigmasPerArc
filename
different accuracies for each arc (multiplied with sigma)
inputfileSigmasPerEpoch
filename
different accuracies for each epoch (added)
inputfileCovarianceFunction
filename
covariances in time for along, cross, and radial direction
Provides arc-wise covariance matrices for satellite-to-satellite observations (SST).
The inputfileCovarianceFunction provides a temporal covariance function.
From it the Toeplitz covariance matrix is constructed
\[
\M C = \begin{pmatrix}
cov(t_0) & cov(t_1) & \cdots & & & \\
cov(t_1) & cov(t_0) & cov(t_1) & \cdots & & \\
\cdots & cov(t_1) & cov(t_0) & cov(t_1) & \cdots & \\
& \cdots & \ddots & \ddots & \ddots & \cdots \\
\end{pmatrix} \\
\]
The complete covariance matrix of an arc is given by
\[
\M C_{arc} = \sigma_0^2 \sigma_{arc}^2 \M C + \sigma_{S,arc}^2 \M S_{arc}+ \text{diag}(\sigma_1^2, \sigma_2^2, \ldots, \sigma_n^2)
\]where sigma $\sigma_0$ is an overall factor and the arc specific factors $\sigma_{arc}$
can be provided with inputfileSigmasPerArc.
The second term describes general covariance matrices for each arc
inputfileCovarianceMatrixArc together with the factors $\sigma_{S,arc}$ from sigmasCovarianceMatrixArc.
The last matrix can be used to downweight outliers in single epochs and will be added if
inputfileSigmasPerEpoch is provided.
Name
Type
Annotation
covarianceSstType
sequence
sigma
double
general variance factor
inputfileSigmasPerArc
filename
different accuaries for each arc (multplicated with sigma)
inputfileSigmasPerEpoch
filename
different accuaries for each epoch (added)
inputfileCovarianceFunction
filename
covariance function in time
inputfileCovarianceMatrixArc
filename
one matrix file per arc. Use {arcNo} as template
sigmasCovarianceMatrixArc
filename
vector with one sigma for each covarianceMatrixArc
Digital filter implementation for the filtering of equally spaced time series. This class implements the filter equations as
\[\label{digitalFilterType:arma}
\sum_{l=0}^Q a_l y_{n-l} = \sum_{k=-p_0}^{P-p_0-1} b_k x_{n-k}, \hspace{25pt} a_0 = 1,
\]where $Q$ is the autoregressive (AR) order and $P$ is the moving average (MA) order. Note that the MA part can also be non-causal.
The characteristics of a filter cascade can be computed by the programs DigitalFilter2FrequencyResponse and DigitalFilter2ImpulseResponse.
To apply a filter cascade to a time series (or an instrument file ) use InstrumentFilter.
Each filter can be applyed in forward and backward direction by setting backwardDirection.
If the same filter is applied in both directions, the combined filter has zero phase and the squared magnitude response.
Setting inFrequencyDomain to true applies the transfer function of the filter to the DFT of the input and synthesizes the result, i.e.:
\[
y_n = \mathcal{F}^{-1}\{H\cdot\mathcal{F}\{x_n\}\}.
\]This is equivalent to setting padType to periodic.
To reduce warmup effects, the input time series can be padded by choosing a padType:
none: no padding is applied
zero: zeros are appended at the beginning and end of the input time series
constant: the beginning of the input time series is padded with the first value, the end is padded with the last value
periodic: periodic continuation of the input time series (i.,e. the beginning is padded with the last epochs and the end is padded with the first epochs)
symmetric: beginning and end are reflected around the first and last epoch respectively
MovingAverage
Moving average (boxcar) filter. For odd lengths, this filter is symmetric and has therefore no phase shift. For even lengths, a phase shift of half a cycle is introduced.
Moving median filter of length $n$. The filter output at epoch $k$ is the median of the set start at $k-n/2$ to $k+n/2$.
The filter length $n$ should be uneven to avoid a phase shift.
Name
Type
Annotation
length
uint
length of the moving window [epochs]
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Derivative
Symmetric MA filter for numerical differentiation using polynomial approximation. The input time series is approximated by a moving polynomial of degree polynomialDegree, by solving
\[
\begin{bmatrix} x(t_k+\tau_0) \\ \vdots \\ x(t_k+\tau_M) \end{bmatrix}
=
\begin{bmatrix}
1 & \tau_0 & \tau_0^2 & \cdots & \tau_0^M \\
\vdots & \vdots & \vdots & & \vdots \\
1 & \tau_M & \tau_M^2 & \cdots & \tau_M^M \\
\end{bmatrix}%^{-1}
\begin{bmatrix}
a_0 \\ \vdots \\ a_M
\end{bmatrix}
\qquad\text{with}\quad
\tau_j = (j-M/2)\cdot \Delta t,
\]for each time step $t_k$ ($\Delta t$ is the sampling of the time series).
The filter coefficients for the $k$-th derivative are obtained by taking the appropriate row of the inverse coefficient matrix $\mathbf{W}$:
\[
b_n = \prod_{i=0}^{k-1} (k-i) \mathbf{w}_{2,:}.
\]The polynomialDegree should be even if no phase shift should be introduced.
Name
Type
Annotation
polynomialDegree
uint
degree of approximation polynomial
derivative
uint
take kth derivative
sampling
double
assumed time step between points
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Integral
Numerical integration using polynomial approximation.
The input time series is approximated by a moving polynomial of degree polynomialDegree
by solving
\[
\begin{bmatrix} x(t_k+\tau_0) \\ \vdots \\ x(t_k+\tau_M) \end{bmatrix}
=
\begin{bmatrix}
1 & \tau_0 & \tau_0^2 & \cdots & \tau_0^M \\
\vdots & \vdots & \vdots & & \vdots \\
1 & \tau_M & \tau_M^2 & \cdots & \tau_M^M \\
\end{bmatrix}%^{-1}
\begin{bmatrix}
a_0 \\ \vdots \\ a_M
\end{bmatrix}
\qquad\text{with}\quad
\tau_j = (j-M/2)\cdot \Delta t,
\]for each time step $t_k$ ($\Delta t$ is the sampling of the time series).
The numerical integral for each time step $t_k$ is approximated by the center interval of the estimated polynomial.
Figure: Numerical integration by polynomial approximation.
polynomialDegree should be even to avoid a phase shift.
Name
Type
Annotation
polynomialDegree
uint
degree of approximation polynomial
sampling
double
assumed time step between points
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Correlation
Correlation ($\rho$) of corr is introduced into the time series:
\[
y_n = \rho\cdot y_{n-1} + \sqrt{1-\rho^2}x_n.
\]
Name
Type
Annotation
correlation
double
correlation
backwardDirection
boolean
apply filter in backward direction
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
GraceLowpass
Low pass and differentation filter as used for GRACE KBR and ACC data in the Level1A processing.
Figure: Amplitude response of the low pass filter used in the L1A processing.
Name
Type
Annotation
rawDataRate
double
sampling frequency in Hz (fs).
convolutionNumber
uint
number of self convolutions of the filter kernel
fitInterval
double
length of the filter kernel [seconds]
lowPassBandwith
double
target low pass bandwidth
normFrequency
double
norm filter at this frequency [Hz] (default: GRACE dominant (J2) signal frequency)
reduceQuadraticFit
boolean
remove->filter->restore quadratic fit
derivative
choice
derivative1st
range rate
derivative2nd
range acceleration
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Butterworth
Digital implementation of the Butterworth filter. The design of the filter is done by modifying the analog (continuous time) transfer function, which is
then transformed into the digital domain by using the bilinear transform. The filter coefficients are then determined by a least squares adjustment in time domain.
The filterType can be lowpass, highpass, where one cutoff frequency has to be specified, and bandpass and bandstop where to cutoff frequencies have to be specified.
Cutoff frequencies must be given as normalized frequency $w_n = f/f_{\text{nyq}}$. For a cutoff frequency of 30 mHz for a time series sampled with 5 seconds gives a normalized frequency of $0.03/0.1 = 0.3$.
Name
Type
Annotation
order
uint
filter order
type
choice
filter type
lowpass
sequence
Wn
double
normalized cutoff frequency (f_c / f_nyq)
highpass
sequence
Wn
double
normalized cutoff frequency (f_c / f_nyq)
bandpass
sequence
Wn1
double
lower normalized cutoff frequency (f_c / f_nyq)
Wn2
double
upper normalized cutoff frequency (f_c / f_nyq)
bandstop
sequence
Wn1
double
lower normalized cutoff frequency (f_c / f_nyq)
Wn2
double
upper normalized cutoff frequency (f_c / f_nyq)
backwardDirection
boolean
apply filter in backward direction
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
File
Read filter coefficients of \eqref{digitalFilterType:arma} from a coefficient file.
One column might define the index $n$
of the coefficients $a_n$ and $b_n$ in the other columns.
Name
Type
Annotation
inputfileMatrix
filename
matrix with filter coefficients
index
expression
index of coefficients (input columns are named data0, data1, ...)
bn
expression
MA coefficients (moving average) (input columns are named data0, data1, ...)
an
expression
AR coefficients (autoregressive) (input columns are named data0, data1, ...)
backwardDirection
boolean
apply filter in backward direction
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Wavelet
Filter representation of a wavelet.
Name
Type
Annotation
inputfileWavelet
filename
wavelet coefficients
type
choice
filter type
lowpass
highpass
level
uint
compute filter for specific decomposition level
backwardDirection
boolean
apply filter in backward direction
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Notch
Implemented after Christian Siemes' dissertation, page 106.
Figure: Amplitude response of a notch filter of order three with default settings.
Name
Type
Annotation
notchFrequency
double
normalized notch frequency w_n = (f_n/f_nyq)
bandWidth
double
bandwidth at -3db. Quality factor of filter Q = w_n/bw
backwardDirection
boolean
apply filter in backward direction
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
Decorrelation
Moving average decorrelation filter based on eigendecomposition of a Toeplitz covariance matrix.
Name
Type
Annotation
inputfileCovarianceFunction
filename
covariance function of time series
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
TimeLag
Lag operator in digital filter representation.
Name
Type
Annotation
lag
int
lag epochs: 1 (lag); -1 (lead)
inFrequencyDomain
boolean
apply filter in frequency domain
padType
choice
none
no padding is applied
zero
zero padding
constant
pad using first and last value
periodic
periodic continuation of matrix
symmetric
symmetric continuation around the matrix edges
ReduceFilterOutput
Removes the filtered signal from the input, i.e. the input is passed
through a digitalFilter with a frequency response of $1-H(f)$.
================================================
FILE: docs/html/documentationSearchIndex.js
================================================
var documents = {
'general.configFiles': { 'name': 'general.configFiles', 'key': 'general.configFiles', 'description': 'GROOPS is controlled by XML configuration files. One or more configuration files must be passed as arguments to GROOPS: groops config1.xml config2.xml [...] These files can be created with the graphical user interface program groopsGui in a convenient way (see section GUI ). A complete formal (computer readable) description of a configuration file in the form of an XSD schema file can be created with the command groops --xsd groops.xsd A configuration file consists of a list of programs that are executed in sequential order. Each program comes with its own config options and they work independently without any internal communication between programs. Data flow between programs is realized via files. An outputfile of one program can serve as an inputfile for the next program. Most programs are deliberately kept small and focused on a specific task. This modularity combined with the general purpose design of many programs enables the creation of complex workflows with little effort. Including loops and conditions in a config file provides even more flexibility. Individual programs (and also other optional config elements) can be disabled and are ignored during execution. Mandatory config elements are indicated by a star ( * ). Empty optional elements are ignored or a meaningful default value is assumed. The elements of a configuration file can be one of the following basic data types: int : integer number uint : unsigned integer number double : floating point number angle : given in degree time : given in modified Julian date (MJD) boolean : 0: false, 1: true string : text filename : absolute path to a file or path relative to the working directory expression : numerical expression evaluated during execution doodson : Doodson number or Darwin\'s name of a tidal frequency gnssType : GNSS observation type according to the RINEX 3 definition The first 5 data types also allow numerical expressions as input in addition to pure numbers. In addition to these basic types, there are a large number of complex data types called classes, which are described in section Classes . In addition to programs, a config file can also include elements called variables. These elements are comparable to read-only variables in programming and can be referenced from any program and config element. This can be done by either linking an element directly to a variable or by using the name as a variable in an expression of an input field (see section Parsers and variables ). While elements can only be directly linked to variables of the same type, this also supports complex data types such as section . Thus it is possible to, for example, define a reference gravity field once in the global section and use it multiple times in different programs. Variables can be declared anywhere in the configuration file. Variables in locations other than the global section have a local scope and hide global variables or variables from a hierarchy level above. They are valid after declaration until the end of the hierarchy level is reached or a new variable with the same name is declared. Variables are not evaluated directly when they are declared, but only later when they are used in a config element. This means, for example, that a variable satelliteFile with data/swarm_orbit_{loopTime:%D}.dat can be declared in the global section without the variable loopTime having to be known at this time. One special variable is groopsDataDir , which is used as a variable in most default file paths throughout many GROOPS programs. Since this variable is going to be needed in most config files, it is recommended to define it in a template file that is used when creating new config files in the GUI. See section Graphical User Interface (GUI) for details on how to set up a template file. In addition, the variables groopsConfigFile and workingDir are set automatically. Global variables can be manipulated when running a config file by passing the argument --global <name>=<value> . For example, running the command groops --global timeStart=58849 --global satellite=swarm config.xml runs the config file config.xml but replaces the values of the global variable timeStart and satellite with 58849 and swarm , respectively. If a global variable passed as an argument does not already exist in the config file, it will be added with the type string . Only the basic data types listed above are supported. This feature can be useful when running GROOPS from the command line or from an external script file.', 'config_table': '', 'display_text': 'GROOPS is controlled by XML configuration files. One or more configuration files must be passed as arguments to GROOPS:
groops config1.xml config2.xml [...]
These files can be created with the graphical user interface program groopsGui in a convenient way (see section GUI). A complete formal (computer readable) description of a configuration file in the form of an XSD schema file can be created with the command
groops --xsd groops.xsd
A configuration file consists of a list of programs that are executed in sequential order. Each program comes with its own config options and they work independently without any internal communication between programs. Data flow between programs is realized via files. An outputfile of one program can serve as an inputfile for the next program. Most programs are deliberately kept small and focused on a specific task. This modularity combined with the general purpose design of many programs enables the creation of complex workflows with little effort. Including loops and conditions in a config file provides even more flexibility.
Individual programs (and also other optional config elements) can be disabled and are ignored during execution. Mandatory config elements are indicated by a star (*). Empty optional elements are ignored or a meaningful default value is assumed.
The elements of a configuration file can be one of the following basic data types:
int: integer number
uint: unsigned integer number
double: floating point number
angle: given in degree
time: given in modified Julian date (MJD)
boolean: 0: false, 1: true
string: text
filename: absolute path to a file or path relative to the working directory
expression: numerical expression evaluated during execution
doodson: Doodson number or Darwin\'s name of a tidal frequency
gnssType: GNSS observation type according to the RINEX 3 definition
The first 5 data types also allow numerical expressions as input in addition to pure numbers. In addition to these basic types, there are a large number of complex data types called classes, which are described in section Classes.
Variables
In addition to programs, a config file can also include elements called variables. These elements are comparable to read-only variables in programming and can be referenced from any program and config element. This can be done by either linking an element directly to a variable or by using the name as a variable in an expression of an input field (see section Parsers and variables). While elements can only be directly linked to variables of the same type, this also supports complex data types such as gravityfield. Thus it is possible to, for example, define a reference gravity field once in the global section and use it multiple times in different programs.
Variables can be declared anywhere in the configuration file. Variables in locations other than the global section have a local scope and hide global variables or variables from a hierarchy level above. They are valid after declaration until the end of the hierarchy level is reached or a new variable with the same name is declared.
Variables are not evaluated directly when they are declared, but only later when they are used in a config element. This means, for example, that a variable satelliteFile with data/swarm_orbit_{loopTime:%D}.dat can be declared in the global section without the variable loopTime having to be known at this time.
One special variable is groopsDataDir, which is used as a variable in most default file paths throughout many GROOPS programs. Since this variable is going to be needed in most config files, it is recommended to define it in a template file that is used when creating new config files in the GUI. See section Graphical User Interface (GUI) for details on how to set up a template file.
In addition, the variables groopsConfigFile and workingDir are set automatically.
Global variables can be manipulated when running a config file by passing the argument --global <name>=<value>. For example, running the command
runs the config file config.xml but replaces the values of the global variable timeStart and satellite with 58849 and swarm, respectively. If a global variable passed as an argument does not already exist in the config file, it will be added with the type string. Only the basic data types listed above are supported. This feature can be useful when running GROOPS from the command line or from an external script file.'},
'general.parser': { 'name': 'general.parser', 'key': 'general.parser', 'description': 'In all input fields that accept numbers (int, uint, double, angle, time) numerical expressions are also allowed. Declared variables can be accessed via their name. The following operations and functions are defined: Constants: pi() , rho()=180/pi() , nan() , c() : light velocity, G() : gravitational constant, GM() : gravitational constant of the Earth, R() : reference radius of the Earth Mathematical: + , - , * , / , ^ Comparison: == , != , < , <= , > , >= , result is 1 or 0 Logical: not ! , and && , || , or isnan(x) , result is 1 or 0 Functions: sqrt(x) , exp(x) , sin(x) , cos(x) , tan(x) , asin(x) , acos(x) , atan(x) , abs(x) , round(x) , ceil(x) , floor(x) , deg2rad(x) , rad2deg(x) Functions with 2 arguments: atan2(y,x) , min(x,y) , max(x,y) , mod(x,y) Time functions: now() : local time in MJD, date2mjd(year, month, day) , gps2utc(mjd) , utc2gps(mjd) , dayofyear(mjd) , decimalyear(mjd) Condition: if(c,x,y) : If the first argument is true (not 0), the second argument is evaluated, otherwise the third. Before the mathematical expression parser evaluates the expression, a simple text parser is applied. The text parser is used for all input fields (also file names). It scans the text for terms like {variable} and replaces it by the text content of the variable . A literal \' { \' character must be escaped with \' #{ \'. The text parser allows regex replacements in the form {text/regex/replace} . All matches of regex in the text are replaced by replace . Possible {variables} in the three parts are evaluated beforehand. Capturing groups () can be accessed by $1 , $2 , in the replacement ( $0 is the complete match). Additional escape sequences are: \\l lowercase next char, \\u uppercase next char, \\L lowercase until \\E , \\U uppercase until \\E , \\Q quote (disable) pattern metacharacters until \\E , \\E end either case modification or quoted section. Examples: {{variable}/test/text} replaces all occurrences of test by text . {TEXT/.+/\\L$0} converts text to lower case. {012345/.#{2}(.#{3}).*/$1} extracts the substring at index 2 and length 3 resulting in 234 . Note the escaping #{ . The text parser also evaluates terms in the form {expression:format} and replaces it by a formatted output. In order not to get confused with the regex replacements, the \' / \' character must be escaped with \' #/ \' in the expression. The format contains the text to be written as output. It can contain embedded format specifiers that are replaced by the value of the expression and formatted as requested (also multiple times). In the following, the resulting formatted output is given in the brackets for an expression with the example value of 57493.8: %i : Integer [57494] %f : Decimal floating point [57493.800000] %e : Scientific notation [5.749380e+04] %g : Use the shortest representation: %e or %f [57493.8] %c : Interpret number as ASCII character %% : Write a single literal % character The following specifiers interpret the value of the expression as MJD (modified Julian date): %y : Four digit year [2016] %Y : Two digit year [16] %m : Month [04] %d : Day of month [15] %H : Hour [19] %M : Minute [12] %S : Second [00] %D : Date (same as %y-%m-%d ) [2016-04-15] %T : Time (same as %H-%M-%S ) [19-12-00] %W : GPS week [1892] %w : Day of GPS week (0..6) [5] %O : Day of year (1..366) The format can be specified further with %[width][.precision]specifier , where [width] is the minimum number of characters to be printed. If the value to be printed is shorter than this number, the result is padded with blank spaces (or zeros if [width] starts with a zero). The [.precision] defines the number of digits after the period (for %g the number of significant digits instead). Example: Two variables time = 57493+19/24+12/1440 and satellite = swarm are set in the global section. The inputfile = data/{time:%y}/{satellite}_{time:%D}.dat is expanded to "data/2016/swarm_2016-04-15.dat" . Example: The variable x = 3+5 is set in the global section. The expression number = 2*x is evaluated by the expression parser to =16 . In contrast if we use brackets like in number = 2*{x} the expression is first evaluated by the text parser to "2*3+5" and the expression parser now gives the result =11 . Some programs (e.g. FunctionsCalculate , InstrumentArcCalculate , GriddedDataCalculate , or the plot programs) read data ( matrix ) or gridded data and evaluate input/output expressions for each data row. For these kind of expressions additional variables are automatically defined for each data column ( X stands for the data column number: ): index : the row number, starting with zero dataX : the value itself dataXcount : number of rows dataXmin dataXmax dataXsum dataXmean dataXrms : root mean square dataXstd : standard deviation dataXmedian dataXmad : median absolute deviation dataXstep : the minimal difference between two neighboring data points in the column For gridded data input the following variables are additionally defined for each data point: longitude in degrees latitude in degrees height in meters cartesianX coordinate in meters cartesianY coordinate in meters cartesianZ coordinate in meters area of the unit sphere dataXwmean : area-weighted mean dataXwrms : area-weighted root mean square dataXwstd : area-weighted standard deviation The XML configuration file is evaluated by two parsers. In a first step a text parser is applied. In the second step mathematical expressions are resolved to a number. Variables (see section variables ) can be referenced via their name directly for the expression parser or in the form {name} for the text parser.', 'config_table': '', 'display_text': 'The XML configuration file is evaluated by two parsers. In a first step a text parser is applied. In the second step mathematical expressions are resolved to a number. Variables (see section variables) can be referenced via their name directly for the expression parser or in the form {name} for the text parser.
Mathematical expression parser
In all input fields that accept numbers (int, uint, double, angle, time) numerical expressions are also allowed. Declared variables can be accessed via their name. The following operations and functions are defined:
Constants: pi(), rho()=180/pi(), nan(), c(): light velocity, G(): gravitational constant, GM(): gravitational constant of the Earth, R(): reference radius of the Earth
Mathematical: +, -, *, /, ^
Comparison: ==, !=, <, <=, >, >=, result is 1 or 0
Logical: not !, and &&, ||, or isnan(x), result is 1 or 0
Functions with 2 arguments: atan2(y,x), min(x,y), max(x,y), mod(x,y)
Time functions: now(): local time in MJD, date2mjd(year, month, day), gps2utc(mjd), utc2gps(mjd), dayofyear(mjd), decimalyear(mjd)
Condition: if(c,x,y): If the first argument is true (not 0), the second argument is evaluated, otherwise the third.
Text parser
Before the mathematical expression parser evaluates the expression, a simple text parser is applied. The text parser is used for all input fields (also file names). It scans the text for terms like {variable} and replaces it by the text content of the variable. A literal \'{\' character must be escaped with \'#{\'.
The text parser allows regex replacements in the form {text/regex/replace}. All matches of regex in the text are replaced by replace. Possible {variables} in the three parts are evaluated beforehand. Capturing groups () can be accessed by $1, $2, in the replacement ($0 is the complete match). Additional escape sequences are:
\\l lowercase next char,
\\u uppercase next char,
\\L lowercase until \\E,
\\U uppercase until \\E,
\\Q quote (disable) pattern metacharacters until \\E,
\\E end either case modification or quoted section.
Examples:
{{variable}/test/text} replaces all occurrences of test by text.
{TEXT/.+/\\L$0} converts text to lower case.
{012345/.#{2}(.#{3}).*/$1} extracts the substring at index 2 and length 3 resulting in 234. Note the escaping #{.
The text parser also evaluates terms in the form {expression:format} and replaces it by a formatted output. In order not to get confused with the regex replacements, the \'/\' character must be escaped with \'#/\' in the expression. The format contains the text to be written as output. It can contain embedded format specifiers that are replaced by the value of the expression and formatted as requested (also multiple times). In the following, the resulting formatted output is given in the brackets for an expression with the example value of 57493.8:
%i: Integer [57494]
%f: Decimal floating point [57493.800000]
%e: Scientific notation [5.749380e+04]
%g: Use the shortest representation: %e or %f [57493.8]
%c: Interpret number as ASCII character
%%: Write a single literal % character
The following specifiers interpret the value of the expression as MJD (modified Julian date):
%y: Four digit year [2016]
%Y: Two digit year [16]
%m: Month [04]
%d: Day of month [15]
%H: Hour [19]
%M: Minute [12]
%S: Second [00]
%D: Date (same as %y-%m-%d) [2016-04-15]
%T: Time (same as %H-%M-%S) [19-12-00]
%W: GPS week [1892]
%w: Day of GPS week (0..6) [5]
%O: Day of year (1..366)
The format can be specified further with %[width][.precision]specifier, where [width] is the minimum number of characters to be printed. If the value to be printed is shorter than this number, the result is padded with blank spaces (or zeros if [width] starts with a zero). The [.precision] defines the number of digits after the period (for %g the number of significant digits instead).
Example: Two variables time=57493+19/24+12/1440 and satellite=swarm are set in the global section. The inputfile=data/{time:%y}/{satellite}_{time:%D}.dat is expanded to "data/2016/swarm_2016-04-15.dat".
Example: The variable x=3+5 is set in the global section. The expression number=2*x is evaluated by the expression parser to =16. In contrast if we use brackets like in number=2*{x} the expression is first evaluated by the text parser to "2*3+5" and the expression parser now gives the result =11.
Variables for data
Some programs (e.g. FunctionsCalculate, InstrumentArcCalculate, GriddedDataCalculate, or the plot programs) read data (matrix) or gridded data and evaluate input/output expressions for each data row. For these kind of expressions additional variables are automatically defined for each data column (X stands for the data column number: $0\\ldots n$):
index: the row number, starting with zero
dataX: the value itself
dataXcount: number of rows
dataXmin
dataXmax
dataXsum
dataXmean
dataXrms: root mean square
dataXstd: standard deviation
dataXmedian
dataXmad: median absolute deviation
dataXstep: the minimal difference between two neighboring data points in the column
For gridded data input the following variables are additionally defined for each data point:
longitude in degrees
latitude in degrees
height in meters
cartesianX coordinate in meters
cartesianY coordinate in meters
cartesianZ coordinate in meters
area of the unit sphere
dataXwmean: area-weighted mean
dataXwrms: area-weighted root mean square
dataXwstd: area-weighted standard deviation
'},
'general.loopsAndConditions': { 'name': 'general.loopsAndConditions', 'key': 'general.loopsAndConditions', 'description': 'The program flow within a config file can be controlled by the classes section and section . The easiest way to access these classes is with the programs LoopPrograms and IfPrograms . The programs defined in IfPrograms are only executed if the defined section is met. A typical example is to check whether a file that should have been created in previous programs actually exists. Further options are string comparisons and checking the result of a numerical expression or the return value of an external command. With LoopPrograms it is possible to repeat the programs defined inside within a loop. The class section creates a sequence to loop over and defines variables that contain the index and element for the current iteration. The section and section can also be attributed to single config elements (including programs). Config elements with an assigned loop are repeated, with the loop variables being evaluated for each element. If a section is attributed to a config element in addition to a loop, each element within the loop is only created if the condition is met. Conditions can also be attributed to optional elements without an associated loop. If the condition is not met, the optional element will be treated as if it was not provided. Example: A program needs all files in a download directory as input. All the inputfile s can be selected manually of course, but it is much easier to assign a loop variable with inputfile = {loopFile} and attribute a section . The loop lists the content of the download directory and assigns each file name to the variableLoopFile = loopFile .', 'config_table': '', 'display_text': 'The program flow within a config file can be controlled by the classes loop and condition. The easiest way to access these classes is with the programs LoopPrograms and IfPrograms.
The programs defined in IfPrograms are only executed if the defined condition is met. A typical example is to check whether a file that should have been created in previous programs actually exists. Further options are string comparisons and checking the result of a numerical expression or the return value of an external command.
With LoopPrograms it is possible to repeat the programs defined inside within a loop. The class loop creates a sequence to loop over and defines variables that contain the index and element for the current iteration.
The loop and condition can also be attributed to single config elements (including programs). Config elements with an assigned loop are repeated, with the loop variables being evaluated for each element. If a condition is attributed to a config element in addition to a loop, each element within the loop is only created if the condition is met. Conditions can also be attributed to optional elements without an associated loop. If the condition is not met, the optional element will be treated as if it was not provided.
Example: A program needs all files in a download directory as input. All the inputfiles can be selected manually of course, but it is much easier to assign a loop variable with inputfile={loopFile} and attribute a loop:directoryListing. The loop lists the content of the download directory and assigns each file name to the variableLoopFile=loopFile.'},
'general.gui': { 'name': 'general.gui', 'key': 'general.gui', 'description': 'The graphical user interface program groopsGui enables the convenient creation of GROOPS config files. It uses the for cross-platform support. The GUI depends on an XSD schema file containing the complete formal (computer readable) description of a GROOPS config file. This schema file can be created with the command: groops --xsd <groopsDir>/groops.xsd At least one schema file has to be set via the menu Settings - Default Paths and Files . Setting more than one schema files enables the schema selector in the toolbar. The selected schema will be used when (re-)opening or creating a config file. This feature is useful when working with different versions of GROOPS at the same time. It is possible to set a template file via the menu Settings - Default Paths and Files . This can be any GROOPS config file. Whenever a new config file is created via the GUI, all global elements and programs defined in the template file are automatically created in the new config file. It is highly recommended to create a template file containing at least the global element groopsDataDir of type filename . This element is used as a variable in most default file paths throughout many GROOPS programs. Thus, setting the path to the base directory containing all GROOPS data once in the template file, for example as groopsDataDir = /home/<user>/groops/data , is the most convenient way to handle default paths in GROOPS. The template file can also contain other often-used global elements, for example tmpDir or timeStart and timeEnd . A working directory can be set via Settings - Default Paths and Files . This directory is used as the default directory in the save dialog of new config files. The GUI offers the option to open the GROOPS documentation for a selected program. To use this feature, the GROOPS documentation must be generated (if not already present) with the command: groops --doc <groopsDir>/docs/ In the menu Settings - Default Paths and Files the path to the HTML version of the documentation must be set (i.e. <groopsDir>/docs/html ). Selecting any program and pressing F1 opens the documentation for this program in an external browser. Pressing F1 without having any program selected opens the main page of the GROOPS documentation. Executing a config file from the GUI requires the setup of a run command in the menu Settings - Commands . It is recommended for this command to open a new terminal in which GROOPS is executed with the config file given as an argument. The placeholders %w and %f are replaced by the directory and file name of the selected config file, respectively. Multiple commands can be set up, with the option to choose one of them in the run dialog. Example commands: Windows: cd /d %w && groops.exe %f Linux (KDE): konsole --workdir %w -e bash -ic "groops %f; bash" Linux (GNOME): gnome-terminal --working-directory=%w -x bash -ic "groops %f; bash" Windows, MPI with 4 processes: cd /d %w && mpiexec -n 4 groopsMPI.exe %f Linux (KDE), MPI with 4 processes: konsole --workdir %w -e bash -ic "mpiexec -n 4 groopsMPI %f; bash" Linux (GNOME), MPI with 4 processes: gnome-terminal --working-directory=%w -x bash -ic "mpiexec -n 4 groopsMPI %f; bash" Most basic features used to manipulate a config element are accessible via the context menu, for example attributing loops and conditions or setting an element global. Global elements automatically appear in the dropdown value list of config elements of the same type. Selecting a global element from the dropdown list as a value links this config element to the global element. In case the global element is removed, all linked elements\' values are replaced by the value of the deleted global element. The sidebar features three widgets: Open Files : An overview of all open config files (select to change current tree) Program List : A list of all programs defined in the schema of the active tree (filterable, supports drag and drop to tree, double click appends program) Undo Stack : Tracks all changes in a config file (select to change state of tree) In case the names of programs or config elements change over time, the GUI offers a rename feature to update outdated config files. The changes must be documented in the schema using GROOPS\' rename feature. Affected elements will be marked with an icon and the context menu item Update name will be available to change the element to the new name defined in the schema. Tree navigation: Enter : Switch focus from tree to input field of selected row Escape : Switch focus from input field back to tree Tab : Next sibling element (or next sibling of parent if there is no next sibling, or next child otherwise) Shift+Tab : Previous sibling element (or parent if there is no previous sibling) Ctrl+Tab : Next tab/tree Ctrl+Shift+Tab : Previous tab/tree Ctrl+Space : Interact with the element (e.g. filename/program: open dialog; time: switch focus between input fields) Ctrl+Up/Down : Next/previous sibling element Ctrl+Left/Right : Fold/expand (complex) element Tree manipulation: Ctrl+Shift+Up/Down : Move unbounded list element (e.g. program, layer) up/down Drag and Drop of tabs to other programs (i.e. text editors) or other GUI windows: Drag : Copy tab (= keep in source window) Shift+Drag : Move tab (= remove from source window) Drag and Drop GROOPS config file(s) into GUI: Drag : Open file(s) in new tab(s) Shift+Drag : Open file in current tab (replaces current tab, only works with a single file)', 'config_table': '', 'display_text': '
The graphical user interface program groopsGui enables the convenient creation of GROOPS config files. It uses the Qt5 framework for cross-platform support.
Settings and first-time setup
The GUI depends on an XSD schema file containing the complete formal (computer readable) description of a GROOPS config file. This schema file can be created with the command:
groops --xsd <groopsDir>/groops.xsd
At least one schema file has to be set via the menu Settings - Default Paths and Files. Setting more than one schema files enables the schema selector in the toolbar. The selected schema will be used when (re-)opening or creating a config file. This feature is useful when working with different versions of GROOPS at the same time.
It is possible to set a template file via the menu Settings - Default Paths and Files. This can be any GROOPS config file. Whenever a new config file is created via the GUI, all global elements and programs defined in the template file are automatically created in the new config file. It is highly recommended to create a template file containing at least the global element groopsDataDir of type filename. This element is used as a variable in most default file paths throughout many GROOPS programs. Thus, setting the path to the base directory containing all GROOPS data once in the template file, for example as groopsDataDir=/home/<user>/groops/data, is the most convenient way to handle default paths in GROOPS. The template file can also contain other often-used global elements, for example tmpDir or timeStart and timeEnd.
A working directory can be set via Settings - Default Paths and Files. This directory is used as the default directory in the save dialog of new config files.
The GUI offers the option to open the GROOPS documentation for a selected program. To use this feature, the GROOPS documentation must be generated (if not already present) with the command:
groops --doc <groopsDir>/docs/
In the menu Settings - Default Paths and Files the path to the HTML version of the documentation must be set (i.e. <groopsDir>/docs/html). Selecting any program and pressing F1 opens the documentation for this program in an external browser. Pressing F1 without having any program selected opens the main page of the GROOPS documentation.
Executing a config file from the GUI requires the setup of a run command in the menu Settings - Commands. It is recommended for this command to open a new terminal in which GROOPS is executed with the config file given as an argument. The placeholders %w and %f are replaced by the directory and file name of the selected config file, respectively. Multiple commands can be set up, with the option to choose one of them in the run dialog.
Linux (GNOME): gnome-terminal --working-directory=%w -x bash -ic "groops %f; bash"
Windows, MPI with 4 processes: cd /d %w && mpiexec -n 4 groopsMPI.exe %f
Linux (KDE), MPI with 4 processes: konsole --workdir %w -e bash -ic "mpiexec -n 4 groopsMPI %f; bash"
Linux (GNOME), MPI with 4 processes: gnome-terminal --working-directory=%w -x bash -ic "mpiexec -n 4 groopsMPI %f; bash"
Basic features
Most basic features used to manipulate a config element are accessible via the context menu, for example attributing loops and conditions or setting an element global. Global elements automatically appear in the dropdown value list of config elements of the same type. Selecting a global element from the dropdown list as a value links this config element to the global element. In case the global element is removed, all linked elements\' values are replaced by the value of the deleted global element.
The sidebar features three widgets:
Open Files: An overview of all open config files (select to change current tree)
Program List: A list of all programs defined in the schema of the active tree (filterable, supports drag and drop to tree, double click appends program)
Undo Stack: Tracks all changes in a config file (select to change state of tree)
In case the names of programs or config elements change over time, the GUI offers a rename feature to update outdated config files. The changes must be documented in the schema using GROOPS\' rename feature. Affected elements will be marked with an icon and the context menu item Update name will be available to change the element to the new name defined in the schema.
Additional keyboard shortcuts
Tree navigation:
Enter: Switch focus from tree to input field of selected row
Escape: Switch focus from input field back to tree
Tab: Next sibling element (or next sibling of parent if there is no next sibling, or next child otherwise)
Shift+Tab: Previous sibling element (or parent if there is no previous sibling)
Ctrl+Tab: Next tab/tree
Ctrl+Shift+Tab: Previous tab/tree
Ctrl+Space: Interact with the element (e.g. filename/program: open dialog; time: switch focus between input fields)
Ctrl+Up/Down: Next/previous sibling element
Ctrl+Left/Right: Fold/expand (complex) element
Tree manipulation:
Ctrl+Shift+Up/Down: Move unbounded list element (e.g. program, layer) up/down
Drag and Drop of tabs to other programs (i.e. text editors) or other GUI windows:
Drag: Copy tab (= keep in source window)
Shift+Drag: Move tab (= remove from source window)
Drag and Drop GROOPS config file(s) into GUI:
Drag: Open file(s) in new tab(s)
Shift+Drag: Open file in current tab (replaces current tab, only works with a single file)
'},
'general.constants': { 'name': 'general.constants', 'key': 'general.constants', 'description': 'GROOPS uses some built-in constants like DEFAULT_GM or the definition of leap seconds, which are defined in source/base/constants.cpp . A complete list of the constants can be written to an XML file with: groops --write-settings <groopsDefaults.xml> The built-in constants can be overwritten by a groopsDefaults.xml file in the working directory or by explicitly passing the file as an argument at execution: groops --settings <groopsDefaults.xml> <config.xml> It might also be useful to adjust the default values in the schema file used by the GUI : groops --settings <groopsDefaults.xml> --xsd <groops.xsd> Example file: <?xml version="1.0" encoding="UTF-8"?> <groops> <LIGHT_VELOCITY>299792458</LIGHT_VELOCITY> <DEFAULT_GM>3.986004415e+14</DEFAULT_GM> <DEFAULT_R>6378136.3</DEFAULT_R> <GRS80_a>6378137.0</GRS80_a> <GRS80_f>298.2572221010</GRS80_f> <GRAVITATIONALCONSTANT>6.6730e-11</GRAVITATIONALCONSTANT> <R_Earth>6.37813630000000e+06</R_Earth> <R_Moon>1.73800000000000e+06</R_Moon> <GM_Earth>3.98600441500000e+14</GM_Earth> <GM_Sun>1.32712442076000e+20</GM_Sun> <GM_Moon>4.90280105600000e+12</GM_Moon> <GM_MERCURY>2.20320808280762e+13</GM_MERCURY> <GM_VENUS>3.24858603864143e+14</GM_VENUS> <GM_MARS>4.28283149222192e+13</GM_MARS> <GM_JUPITER>1.26712769822770e+17</GM_JUPITER> <GM_SATURN>3.79406266494906e+16</GM_SATURN> <TIME_EPSILON>1.0e-05</TIME_EPSILON> <DELTA_TAI_GPS>19</DELTA_TAI_GPS> <DELTA_TT_GPS>51.184</DELTA_TT_GPS> <J2000>51544.5</J2000> <leapSecond> <MJD>57754</MJD> <DELTA_UTC_GPS>-18</DELTA_UTC_GPS> </leapSecond> <leapSecond> <MJD>57204</MJD> <DELTA_UTC_GPS>-17</DELTA_UTC_GPS> </leapSecond> <leapSecond> <MJD>56109</MJD> <DELTA_UTC_GPS>-16</DELTA_UTC_GPS> </leapSecond> ... <leapSecond> <MJD>41317</MJD> <DELTA_UTC_GPS>9</DELTA_UTC_GPS> </leapSecond> <leapSecond> <MJD>0</MJD> <DELTA_UTC_GPS>10</DELTA_UTC_GPS> </leapSecond> </groops>', 'config_table': '', 'display_text': 'GROOPS uses some built-in constants like DEFAULT_GM or the definition of leap seconds, which are defined in source/base/constants.cpp.
A complete list of the constants can be written to an XML file with:
groops --write-settings <groopsDefaults.xml>
The built-in constants can be overwritten by a groopsDefaults.xml file in the working directory or by explicitly passing the file as an argument at execution:
'},
'general.parallelization': { 'name': 'general.parallelization', 'key': 'general.parallelization', 'description': 'If GROOPS is compiled with the (MPI), most GROOPS programs can be run in parallel on multiple processor cores. Processing on computer clusters with distributed memory is also supported. Many loops are parallelized by computing each loop step at a different core. Usually the first node distributes the work load, assigns loop steps to different cores, and is not participating on the actual loop computation. This means running GROOPS with only two nodes has no advantages in almost all cases. Non-parallel parts and programs without parallel support are executed at the first node only. Large systems of normal equations , which are divided into blocks, are distributed over the nodes to reduce the memory consumption on each single node. As all nodes may read and write files (at least reading the config files ) the required part of the file system must be available on all participating computers.', 'config_table': '', 'display_text': 'If GROOPS is compiled with the Message Passing Interface (MPI), most GROOPS programs can be run in parallel on multiple processor cores. Processing on computer clusters with distributed memory is also supported.
Many loops are parallelized by computing each loop step at a different core. Usually the first node distributes the work load, assigns loop steps to different cores, and is not participating on the actual loop computation. This means running GROOPS with only two nodes has no advantages in almost all cases. Non-parallel parts and programs without parallel support are executed at the first node only.
Large systems of normal equations, which are divided into blocks, are distributed over the nodes to reduce the memory consumption on each single node.
As all nodes may read and write files (at least reading the config files) the required part of the file system must be available on all participating computers.'},
'fundamentals.robustLeastSquares': { 'name': 'fundamentals.robustLeastSquares', 'key': 'fundamentals.robustLeastSquares', 'description': 'The robust least squares adjustment used in GROOPS is based on a modified Huber estimator. It downweights observations with large otuliers iteratively. The algorithm starts with a first solution with equal weights The solution is used to compute the residuals and the redundancies of all observations For observations with large residuals a new standard deviation is assigned where is huber , is huberPower , and a robust overall variance factor computed from all residuals. The estimation is repeated huberMaxIteration times with a new weight matrix or until convergence is reached.', 'config_table': '', 'display_text': 'The robust least squares adjustment used in GROOPS is based on a modified Huber estimator. It downweights observations with large otuliers iteratively.
The algorithm starts with a first solution with equal weights $\\M P =\\M I$ \\[ \\hat{\\M x} = (\\M A^T\\M P\\M A)^{-1}\\M A^T\\M P\\M l. \\]The solution is used to compute the residuals \\[ \\hat{e}_i = \\left(\\M l - \\M A \\hat{\\M x}\\right)_i \\]and the redundancies of all observations \\[ r_i = \\left(\\M I - \\M A(\\M A^T\\M P\\M A)^{-1}\\M A^T\\M P\\right)_{ii}. \\]For observations with large residuals a new standard deviation is assigned \\[ \\sigma_i = \\begin{cases} 1 & \\text{for } \\left|\\frac{\\hat{e}_i}{r_i}\\right| \\le h\\cdot\\hat{\\sigma} \\\\ \\left|\\frac{\\hat{e}_i}{r_ih}\\right|^p & \\text{for } \\left|\\frac{\\hat{e}_i}{r_i}\\right| > h\\cdot\\hat{\\sigma}, \\end{cases} \\]where $h$ is huber, $p$ is huberPower, and $\\hat{\\sigma}^2$ a robust overall variance factor computed from all residuals. The estimation is repeated huberMaxIteration times with a new weight matrix \\[ \\M P = \\text{diag}\\left(\\frac{1}{\\sigma_1^2}, \\frac{1}{\\sigma_2^2}, \\ldots, \\frac{1}{\\sigma_n^2}\\right) \\] or until convergence is reached.'},
'fundamentals.basisSplines': { 'name': 'fundamentals.basisSplines', 'key': 'fundamentals.basisSplines', 'description': 'A time variable function is given by with the (spatial) coefficients as parameters and the temporal basis functions . Basis splines are defined as polynomials of degree in intervals between nodal points in time : Block mean values ( ) Linear splines ( ) Quadratic splines ( ) Cubic splines ( ) where is the normlized time in each time interval The total number of coefficients is , where is the count of nodal time points and is the degree.', 'config_table': '', 'display_text': 'A time variable function is given by \\[ f(x,t) = \\sum_i f_i(x)\\Psi_i(t), \\]with the (spatial) coefficients $f_i(x)$ as parameters and the temporal basis functions $\\Psi_i(t)$. Basis splines are defined as polynomials of degree $n$ in intervals between nodal points in time $t_i$:
where $\\tau$ is the normlized time in each time interval \\[ \\tau_i = \\frac{t-t_i}{t_{i+1}-t_i}. \\]The total number of coefficients $f_i(x)$ is $N=N_t+n-1$, where $N_t$ is the count of nodal time points $t_i$ and $n$ is the degree.
'},
'fundamentals.autoregressiveModel': { 'name': 'fundamentals.autoregressiveModel', 'key': 'fundamentals.autoregressiveModel', 'description': 'A multivariate (or vector) autoregressive model is one possible representation of a random process. It specifies, that the output at epoch depends on the previous epochs, where is denoted process order, plus a stochastic term. In the following, finite order vector autoregressive - VAR( ) in short - models as implemented in GROOPS will be described. A finite order VAR( ) model is defined as where are realizations of a random vector process Subtracting the right hand side and substituting the stochastic term with the residual gives us which can be used as pseudo-observation equations in the determination of the parameters . In matrix notation this reads After rearranging the vectors to have ascending time stamps For practical purposes, the residuals above are further decorrelated using the inverse square root of the white noise covariance matrix, leading to The used square root is in principle arbitrary, but should satisfy . This means that both eigendecomposition based roots and Cholesky factors can be used. After the applying the matrix from the left, we arrive at the observation equations which yields fully decorrelated residuals. Currenty, VAR( ) models are saved to a single file which contains this matrix.', 'config_table': '', 'display_text': 'A multivariate (or vector) autoregressive model is one possible representation of a random process. It specifies, that the output at epoch $t$ depends on the $p$ previous epochs, where $p$ is denoted process order, plus a stochastic term. In the following, finite order vector autoregressive - VAR($p$) in short - models as implemented in GROOPS will be described.
Definition
A finite order VAR($p$) model is defined as \\[ \\mathbf{y}_e(t_i) = \\sum_{k=1}^p \\mathbf{\\Phi}^{(p)}_k\\mathbf{y}_e(t_{i-k}) + \\mathbf{w}(t_i), \\hspace{5pt} \\mathbf{w}(t_i) \\sim \\mathcal{N}(0, \\mathbf{\\Sigma}^{(p)}_\\mathbf{w}), \\]where $\\mathbf{y}_e(t_i)$ are realizations of a random vector process Subtracting the right hand side and substituting the stochastic term $-\\mathbf{w}(t_i)$ with the residual $\\mathbf{v}(t_i)$ gives us \\[ \\mathbf{0} = \\mathbf{y}_e(t_i) - \\sum_{k=1}^p \\mathbf{\\Phi}^{(p)}_k\\mathbf{y}_e(t_{i-k}) + \\mathbf{v}(t_i) \\]which can be used as pseudo-observation equations in the determination of the parameters $\\mathbf{y}_e(t_i)$. In matrix notation this reads \\[ 0 = \\begin{bmatrix} \\mathbf{I} & -\\mathbf{\\Phi}^{(p)}_1 & \\cdots & -\\mathbf{\\Phi}^{(p)}_p \\\\ \\end{bmatrix} \\begin{bmatrix} \\mathbf{y}_e(t_i) \\\\ \\mathbf{y}_e(t_{i-1}) \\\\ \\vdots \\\\ \\mathbf{y}_e(t_{i-p}) \\\\ \\end{bmatrix} + \\mathbf{v}(t_i). \\]After rearranging the vectors $\\mathbf{x}_t$ to have ascending time stamps \\[ 0 = \\begin{bmatrix} -\\mathbf{\\Phi}^{(p)}_p & \\cdots & -\\mathbf{\\Phi}^{(p)}_1 & \\mathbf{I} \\\\ \\end{bmatrix} \\begin{bmatrix} \\mathbf{y}_e(t_{i-p}) \\\\ \\vdots \\\\ \\mathbf{y}_e(t_{i-1}) \\\\ \\mathbf{y}_e(t_i) \\\\ \\end{bmatrix} + \\mathbf{v}(t_i) \\]For practical purposes, the residuals above are further decorrelated using the inverse square root of the white noise covariance matrix, leading to \\[ \\bar{\\mathbf{v}}(t_i) = \\underbrace{\\mathbf{\\Sigma}^{(p)^{-\\frac{1}{2}}}_\\mathbf{w}}_{=\\mathbf{W}}\\mathbf{v}(t_i), \\hspace{25pt} \\bar{\\mathbf{v}}(t_i) \\sim \\mathcal{N}(0, \\mathbf{I}). \\]The used square root is in principle arbitrary, but should satisfy $\\mathbf{W}^T\\mathbf{W} = \\mathbf{\\Sigma}^{(p)}_\\mathbf{w} $. This means that both eigendecomposition based roots and Cholesky factors can be used. After the applying the matrix from the left, we arrive at the observation equations \\[ 0 = \\begin{bmatrix} -\\mathbf{W}\\mathbf{\\Phi}^{(p)}_p & \\cdots & -\\mathbf{W}\\mathbf{\\Phi}^{(p)}_1 & \\mathbf{W} \\\\ \\end{bmatrix} \\begin{bmatrix} \\mathbf{y}_e(t_{i-p}) \\\\ \\vdots \\\\ \\mathbf{y}_e(t_{i-1}) \\\\ \\mathbf{y}_e(t_i) \\\\ \\end{bmatrix} + \\bar{\\mathbf{v}}(t_i) \\]which yields fully decorrelated residuals. Currenty, VAR($p$) models are saved to a single file which contains this matrix.'},
'cookbook.instrument': { 'name': 'cookbook.instrument', 'key': 'cookbook.instrument', 'description': 'GROOPS provides functions and programs to read/write, preprocess, analyze and visualize uniformly and non-uniformly sampled instrument data. This includes tools for filter design and analysis, re-sampling, smoothing, detrending, and power spectrum estimation. This tutorial goes through exemplary steps for data handling procedures. GROOPS is able to read and convert relevant data from various LEO and GNSS satellites. Instrument files need to be converted into the respective GROOPS format using conversion programs.Depending on the content of the input file, the data is stored with a specific section . User also has the option to change the type later on with InstrumentSetType . Multiple files can be concatenated to one file using InstrumentConcatenate . Using this program, it is also possible to sort the epochs, remove the duplicates and NaN values. Example: Concatenating instrument files Create three successively daily sinusoidal signals with TimeSeriesCreate and set their type to MISCVALUE with InstrumentSetType . In this example, each data set has an overlap of 1 hour with their following dataset. Merge all datasets to one single file with InstrumentConcatenate . Many measurements involve data collected asynchronously by multiple sensors with different sampling. Use InstrumentSynchronize for a continuous harmonization of the data over time or segmentation of the data into arcs. Real-world data is often incomplete, inconsistent, and/or lacking in certain behaviors or trends, and is likely to contain many errors. Data preprocessing is a proven method of resolving such issues. Following steps are usually required to be taken: Gross outlier removal: Create reference values to compare the input data with. Depending on the instrument type, this can be done by simulation programs such as SimulateAccelerometer or SimulateStarCamera . If no reference data is available, the outlier detection is based on the data itself. If needed, synchronize the reference data file and the input data with InstrumentSynchronize . In case of star camera data, compute the differences between the input data and the reference data with InstrumentStarCameraMultiply . Set a threshold for outlier detection in InstrumentRemoveEpochsByCriteria . The threshold is defined empirically according to the accuracy characteristics of each data products. If the differences exceed a predefined threshold, the corresponding epochs are removed. An arbitrary margin can be defined to additionally remove epochs before and after the identified outliers. It is also possible to remove epochs at specific times using InstrumentRemoveEpochsByTimes . Missing epochs can be filled by reference data with InstrumentConcatenate . It is also possible to interpolate the missing epochs with InstrumentResample . Example: Removing outliers in a synthetic data. Create a sinusoidal signal with an amplitude of 1.0 using TimeSeriesCreate and set its type to MISCVALUE with InstrumentSetType . Add zero-mean, white Gaussian noise with a standard deviation of 0.1 with NoiseInstrument . Interpret this data as a real measurement file. Set the threshold criteria to 0.2 in InstrumentRemoveEpochsByCriteria and remove the outliers and their nearest epochs in 20 second interval. Fill the data gaps with InstrumentResample . Downsampling: If the sampling is irregular use InstrumentResample to make the sampling equidistant. Use InstrumentSynchronize to divide the data at gaps into arcs. Apply a lowpass filter (e.g. Butterworth) with the Nyquist frequency of the target sampling as cutoff with InstrumentFilter . Apply the filter in both directions to avoid phase shifts. Use InstrumentReduceSampling to down-sample the data. Calibration: For a general instrument file, InstrumentDetrend subtracts offsets or linear/nonlinear trends from the input data. This can be achieved also with FunctionsCalculate or InstrumentArcCalculate by applying determined calibration factors or solving a least-square adjustment. For accelerometer data, InstrumentAccelerometerEstimateBiasScale is designed to estimate and subtract complex biases or scales with respect to simulated accelerometer data. If a thruster file is given, the corresponding epochs are eliminated during estimation process. Example: GRACE-C accelerometer calibration For one particular date, read and convert Level-1B GRACE-C orbit, star camera, accelerometer, and thruster data with GraceL1b2Orbit , GraceL1b2StarCamera , GraceL1b2Accelerometer , and GraceL1b2Thruster respectively. It is also required to read the macro-model data of the satellite using the related information in the official document and convert it to GROOPS format with SatelliteModelCreate . Use SimulateAccelerometer to generate simulated accelerations due to non-gravitational force models including: section , section , and section . Calibrate the real measurements with a daily constant accelerometer bias by choosing a constant parameter per axis in section . InstrumentStatisticsTimeSeries returns statistics for one or more instrument files. InstrumentArcCalculate is also able to generate a statistics file with one mid epoch per arc. Spectral analysis studies the frequency spectrum contained in discrete, uniformly sampled data. The Fourier transform is a tool that reveals frequency components of a signal by representing it in frequency space. The Power Spectral Density (PSD) is a measurement of the energy at each frequency. If the sampling is irregular use InstrumentResample to make the sampling equidistant. Use Instrument2PowerSpectralDensity to compute PSD. If covariance function of a dataset is available, use CovarianceFunction2PowerSpectralDensity . Example: Spectral analysis of a synthetic signal. Create a sinusoidal signal with an amplitude of 1.0 using TimeSeriesCreate and set its type to MISCVALUE with InstrumentSetType . Interpret this data as a simulation data file. Add zero-mean, white Gaussian noise with a standard deviation of 0.1 with NoiseInstrument . Interpret this data as a real measurement file. Compute PSD of the simulated and measurement data and represent the results with PlotGraph . Argument of latitude plot Plotting instrument data as a function of satellite position in orbit and time reveals features related to the orbit geometry or environmental conditions. For circular orbits, the position of satellite can be specified by the argument of latitude. Synchronize the instrument data file with the related orbit data using InstrumentSynchronize . Use Orbit2ArgumentOfLatitude to compute argument of latitude at each epoch. Plot the instrument data versus argument of latitude and time with PlotGraph . Example: Argument of latitude representation of GRACE-C eclipse factors Compute eclipse factors at each epoch of GRACE-C orbit at an arbitrary time using Orbit2EclipseFactor . Synchronize the eclipse factor data file with the related orbit data using InstrumentSynchronize . Use Orbit2ArgumentOfLatitude to compute argument of latitude at each epoch and visualize the results with PlotGraph . Ground-track plot Plotting instrument data with respect to the satellite ground track is useful to identify any features of geophysical origin in the data. Synchronize the instrument data file with the related orbit data using InstrumentSynchronize . Use Orbit2Groundtracks to map instrument data to satellite ground-track. Visualize the output with PlotMap . Example 6: Ground-track representation of GRACE-C eclipse factors Compute eclipse factors at each epoch of GRACE-C orbit at an arbitrary time using Orbit2EclipseFactor . Synchronize the eclipse factor data file with the related orbit data using InstrumentSynchronize . Use Orbit2Groundtracks to generate the gridded data. Each grid value represents the mean value of eclipse factor over the instrument time period (1 month). visualize the results with PlotMap .', 'config_table': '', 'display_text': '
GROOPS provides functions and programs to read/write, preprocess, analyze and visualize uniformly and non-uniformly sampled instrument data. This includes tools for filter design and analysis, re-sampling, smoothing, detrending, and power spectrum estimation. This tutorial goes through exemplary steps for data handling procedures.
Reading data
GROOPS is able to read and convert relevant data from various LEO and GNSS satellites. Instrument files need to be converted into the respective GROOPS format using conversion programs.Depending on the content of the input file, the data is stored with a specific instrument type. User also has the option to change the type later on with InstrumentSetType.
Multiple files can be concatenated to one file using InstrumentConcatenate. Using this program, it is also possible to sort the epochs, remove the duplicates and NaN values.
Example: Concatenating instrument files
Create three successively daily sinusoidal signals with TimeSeriesCreate and set their type to MISCVALUE with InstrumentSetType. In this example, each data set has an overlap of 1 hour with their following dataset.
Many measurements involve data collected asynchronously by multiple sensors with different sampling. Use InstrumentSynchronize for a continuous harmonization of the data over time or segmentation of the data into arcs.
Preprocessing
Real-world data is often incomplete, inconsistent, and/or lacking in certain behaviors or trends, and is likely to contain many errors. Data preprocessing is a proven method of resolving such issues. Following steps are usually required to be taken:
Gross outlier removal:
Create reference values to compare the input data with. Depending on the instrument type, this can be done by simulation programs such as SimulateAccelerometer or SimulateStarCamera. If no reference data is available, the outlier detection is based on the data itself. If needed, synchronize the reference data file and the input data with InstrumentSynchronize.
In case of star camera data, compute the differences between the input data and the reference data with InstrumentStarCameraMultiply.
Set a threshold for outlier detection in InstrumentRemoveEpochsByCriteria. The threshold is defined empirically according to the accuracy characteristics of each data products. If the differences exceed a predefined threshold, the corresponding epochs are removed. An arbitrary margin can be defined to additionally remove epochs before and after the identified outliers. It is also possible to remove epochs at specific times using InstrumentRemoveEpochsByTimes.
Apply a lowpass filter (e.g. Butterworth) with the Nyquist frequency of the target sampling as cutoff with InstrumentFilter. Apply the filter in both directions to avoid phase shifts.
For a general instrument file, InstrumentDetrend subtracts offsets or linear/nonlinear trends from the input data. This can be achieved also with FunctionsCalculate or InstrumentArcCalculate by applying determined calibration factors or solving a least-square adjustment.
For accelerometer data, InstrumentAccelerometerEstimateBiasScale is designed to estimate and subtract complex biases or scales with respect to simulated accelerometer data. If a thruster file is given, the corresponding epochs are eliminated during estimation process.
Example: GRACE-C accelerometer calibration
For one particular date, read and convert Level-1B GRACE-C orbit, star camera, accelerometer, and thruster data with GraceL1b2Orbit, GraceL1b2StarCamera, GraceL1b2Accelerometer, and GraceL1b2Thruster respectively. It is also required to read the macro-model data of the satellite using the related information in the official document and convert it to GROOPS format with SatelliteModelCreate.
Calibrate the real measurements with a daily constant accelerometer bias by choosing a constant parameter per axis in parametrizationAcceleration:accBias.
Spectral analysis studies the frequency spectrum contained in discrete, uniformly sampled data. The Fourier transform is a tool that reveals frequency components of a signal by representing it in frequency space. The Power Spectral Density (PSD) is a measurement of the energy at each frequency.
If the sampling is irregular use InstrumentResample to make the sampling equidistant.
Create a sinusoidal signal with an amplitude of 1.0 using TimeSeriesCreate and set its type to MISCVALUE with InstrumentSetType. Interpret this data as a simulation data file.
Add zero-mean, white Gaussian noise with a standard deviation of 0.1 with NoiseInstrument. Interpret this data as a real measurement file.
Compute PSD of the simulated and measurement data and represent the results with PlotGraph.
Data visualization
Argument of latitude plot Plotting instrument data as a function of satellite position in orbit and time reveals features related to the orbit geometry or environmental conditions. For circular orbits, the position of satellite can be specified by the argument of latitude.
Synchronize the instrument data file with the related orbit data using InstrumentSynchronize.
Ground-track plot Plotting instrument data with respect to the satellite ground track is useful to identify any features of geophysical origin in the data.
Synchronize the instrument data file with the related orbit data using InstrumentSynchronize.
Use Orbit2Groundtracks to map instrument data to satellite ground-track.
Example 6: Ground-track representation of GRACE-C eclipse factors
Compute eclipse factors at each epoch of GRACE-C orbit at an arbitrary time using Orbit2EclipseFactor.
Synchronize the eclipse factor data file with the related orbit data using InstrumentSynchronize.
Use Orbit2Groundtracks to generate the gridded data. Each grid value represents the mean value of eclipse factor over the instrument time period (1 month). visualize the results with PlotMap.
'},
'cookbook.gnssNetwork': { 'name': 'cookbook.gnssNetwork', 'key': 'cookbook.gnssNetwork', 'description': 'This cookbook chapter describes an example of global GNSS processing as done by analysis centers of the International GNSS Service (IGS). Resulting products usually comprise: Satellite orbits, clocks, and signal biases Station positions, clocks, signal biases, and troposphere estimates Earth orientation parameters Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections can be found in a doctoral thesis available under DOI . An example scenario for this task is available at https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioGnssNetwork.zip . It includes GROOPS scripts and data for the example, but not the general GROOPS data and metadata found at https://ftp.tugraz.at/pub/ITSG/groops (data folder or zipped archive). The scenario generally represents what is described in this cookbook, but may slightly differ in certain settings. Note: Global GNSS processing can become very computationally intensive. Depending on the number of satellites and stations, the observation and processing sampling, and parametrizations it can quickly exceed the capabilities of a normal desktop computer and may require computer clusters or number crunchers (see section Parallelization ). Most of the required metadata files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops . These files are regularly updated. Data that has to be gathered from other sources comprises: Receiver observations : GNSS measurements converted from RINEX format (see RinexObservation2GnssReceiver ). Approximate orbits : broadcast or precise orbits in CRF for orbit integration (see GnssRinexNavigation2OrbitClock or Sp3Format2Orbit ). Approximate clocks : broadcast or precise clocks (see GnssRinexNavigation2OrbitClock or GnssClockRinex2InstrumentClock ) Receiver observations, broadcast ephemerides, and precise satellite orbits and clocks can be downloaded from the . GPS, GLONASS, and Galileo orbits and clocks for the period 1994-2020 are also available as part of . The includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats. Prepare a station list file that contains the stations to be processed. Each line can contain more than one station. The first station in each line that has data available is used for the processing. If your network contains more than 60-70 stations, it is recommended to start processing with a core network (see Advanced ). In this case, define an additional core station list file that can also have multiple stations per line. Numerical integration of the satellite orbits is the first step in global GNSS processing. Dynamic orbits are integrated based on section and then fitted to the approximate orbits by estimating their initial state and additional empirical parameters for solar radiation pressure to improve the orbit fit. The resulting variational equations file contains the integrated orbit, derivatives with respect to the satellite state vector, attitude, Earth rotation and satellite model. Orbit preprocessing is covered by the script 020groopsGnssPreprocessing.xml in the . It is recommended to perform the steps below in a loop over all satellites/PRNs using LoopPrograms . To get the relation between {prn} and {svn} setup an additional section inside section with section : the old inputfileTransmitterInfo equipmentType = gnssAntenna variableLoopName = block variableLoopSerial = svn variableLoopTimeStart = svnTimeStart variableLoopTimeEnd = svnTimeEnd section expression = (svnTimeStart <= loopTime) && (loopTime < svnTimeEnd) This second loop should perform only one step. The following programs are looped over all {prn} : InstrumentResample : resample approximate orbits from data preparation to target sampling (e.g., 1 minute) by defining a section based on a method:polynomial ( polynomialDegree = 7 , maxDataPointRange = 7200 , maxExtrapolationDistance = 900 ). OrbitAddVelocityAndAcceleration : add velocity via running polynomial ( polynomialDegree = 2 ) derivation (needed for attitude computation) SimulateStarCameraGnss PreprocessingVariationalEquation : section = {groopsDataDir}/gnss/transmitter/satelliteModel/satelliteModel_boxWing.{svn}.xml section : the resampled approximate orbit from InstrumentResample section : the attitude file from SimulateStarCameraGnss forces : see below section : a static gravity field (e.g. GOCO06s) with maxDegree = 4 . PreprocessingVariationalEquationOrbitFit : fit the integrated orbit ( section ) to the approximate orbit ( section ) by least squares adjustment. Add section and select the parameters to be estimated. Force models usually include: section : static gravity field (e.g. GOCO06s) section section : trend component of time-variable gravity field (e.g. GOCO06s) section section : annual cosine component of time-variable gravity field (e.g. GOCO06s) section : annual sine component of time-variable gravity field (e.g. GOCO06s) section : astronomical tides (e.g. based on JPL ephemeris) section : Earth tide (IERS conventions) section : ocean tides (e.g. FES 2014b) section : pole tides (IERS conventions) section : ocean pole tides (IERS conventions) section : solar radiation pressure (box-wing model) section : Earth radiation pressure (albedo model) section : antenna thrust (e.g. from IGS metadata SINEX file) section : relativistic effects (IERS conventions) For the spherical harmonics expansions a maxDegree = 60 is more than enough. The result of the preprocessing should be a variational equations file , a reduced dynamic orbit file from PreprocessingVariationalEquationOrbitFit and an attitude file from SimulateStarCameraGnss for each satellite. The script 030groopsGnssProcessing.xml in the implements the following steps and settings. These are the settings for GnssProcessing . If not otherwise stated use the default values. The first step is setting the processing sampling, in this example it is 30 seconds. The processing interval usually is a single 24-hour day, therefore define section with timeStart = <mjd> , timeEnd = <mjd>+1 , sampling = 30/86400 (processing sampling). Add the appropriate section (e.g. GPS, GLONASS, and Galileo) and provide the required files: section from preprocessing section from preprocessing section from data preparation The following settings are needed in section : section : list of all stations to be processed section : The converted RINEX observation files. section : Use the settings described in receiver:stationNetwork . section : Signals you might want to exclude are C*?G (old unknown GPS code observations), *3*R (GLONASS G3 freq.), *6*E (Galileo E6 freq.). Add the following section and define the outputfiles you are interested in inside each of them: section : add a constraint of sigmaSTEC = 40 section : add section with linear ( degree = 1 ) 2-hourly splines section : optionally change section to section with name = G* to align clocks to mean over GPS (instead of all) satellites section section section section section : time-variable GPS L5 phase bias with section = L5*G and section with degree 3 and hourly nodes. section section : select section with the appropriate vmf3grid file. Add section with linear ( degree = 1 ) 2-hourly splines and section with linear daily splines. section : provide section = preprocessing/variational.{prn}.dat from the preprocessing step. Add section and section parameter at center of day to further improve orbit fit. section section : polar motion section polar motion rate (at center of day with timeStep = 1 ) section : length of day (at center of day with timeStep = -1 to match IGS sign convention) section : loose constraint on GPS L5 phase biases, section = signalBias.L5* , sigma = 5 meters, and relativeToApriori = yes section : loose constraint troposphere estimates, section = troposphere* , sigma = 5 meters, and relativeToApriori = yes section : constraint on stochastic pulses, section = stochasticPulse* , sigma = 0.1 micrometers/second. Finally, define the section . This can be overwhelming at first, but offers a lot of flexibility. The example script uses a 5-minute processing sampling with subsequent clock densification to 30 seconds. section : with nthEpoch = 10 to reduce sampling to 5 minutes. section : disable constraint.STEC , *VTEC , *.tecBiases as the ionosphere parameters are estimated in the final steps only. section : with maxIterationCount = 6 section section : enable * (all) parameters section : with maxIterationCount = 4 : final iterations (with 5-minute sampling) and ionosphere parameters section : clock densification to 30-second sampling section : with nthEpoch = 1 to set full 30-second sampling section : disable * (all) parameters and reenable *.clock* and *.STEC parameters section : with maxIterationCount = 6 section : with suffix = 30s to write 30-second clock files section : write the final results With some additional steps, the full 30-second sampling can be used to estimate all parameters (not only the clocks). These steps are disabled in the example script, as they require at least 16 GB of system memory. In this case, it is not necessary to separately write the 30-second clock files as listed above. section : with nthEpoch = 1 to set full 30-second sampling section : As the system of normal equations can be very large, the memory consumption might be reduced with keepEpochNormalsinMemory = no . In this case the epoch parameters are directly eliminated during the accumulation and reconstructed in the solving step. This might lead to longer computation times. section : with maxIterationCount = 2 : final iterations with full sampling Processing large station networks requires some additional steps to keep the computational load to a reasonable degree. The general processing strategy is to first process a well-distributed subset of stations (i.e. a core network) to get good estimates of all satellite parameters, which then enables integer ambiguity resolution (IAR). Once the ambiguities of the core network are resolved and stable estimates for satellite phase biases are available, all other (non-core) stations can be processed individually (including IAR) while keeping the satellite parameters fixed. At last, all stations can be processed together with all satellite parameters and ionosphere parameters. Let\'s start with the section of the core network: section with section using the core network station list file from data preparation as section . section : with nthEpoch = 10 to reduce sampling to 5 minutes. section : disable constraint.STEC , *VTEC , *.tecBiases as the ionosphere parameters are estimated in the final steps only. section : with maxIterationCount = 6 section section : with maxIterationCount = 4 : final iterations (with 5-minute sampling) Now all other (non-core) stations can be processed separately: section : with section inside section using the station list from the core network above to process all non-core stations individually with fixed transmitter parameters section : with maxIterationCount = 6 section section : with maxIterationCount = 4 Next all stations are processed together with all parameters: section : with section section : with nthEpoch = 1 to set full 30-second sampling section : clock densification to 30-second sampling section : disable * (all) parameters and reenable *.clock* and *.STEC parameters section : with maxIterationCount = 6 section : enable * (all) parameters. section : with keepEpochNormalsinMemory = no section : with maxIterationCount = 4 : final iterations with full sampling and all parameters section : write the final results', 'config_table': '', 'display_text': 'This cookbook chapter describes an example of global GNSS processing as done by analysis centers of the International GNSS Service (IGS). Resulting products usually comprise:
Satellite orbits, clocks, and signal biases
Station positions, clocks, signal biases, and troposphere estimates
Earth orientation parameters
Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections can be found in a doctoral thesis available under DOI 10.3217/978-3-85125-885-1.
Note: Global GNSS processing can become very computationally intensive. Depending on the number of satellites and stations, the observation and processing sampling, and parametrizations it can quickly exceed the capabilities of a normal desktop computer and may require computer clusters or number crunchers (see sectionParallelization).
Data preparation
Most of the required metadata files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops. These files are regularly updated.
Data that has to be gathered from other sources comprises:
The example scenario includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
Prepare a station list file that contains the stations to be processed. Each line can contain more than one station. The first station in each line that has data available is used for the processing. If your network contains more than 60-70 stations, it is recommended to start processing with a core network (see Advanced). In this case, define an additional core station list file that can also have multiple stations per line.
Preprocessing: Orbit integration
Numerical integration of the satellite orbits is the first step in global GNSS processing. Dynamic orbits are integrated based on force models and then fitted to the approximate orbits by estimating their initial state and additional empirical parameters for solar radiation pressure to improve the orbit fit. The resulting variational equations file contains the integrated orbit, derivatives with respect to the satellite state vector, attitude, Earth rotation and satellite model.
Orbit preprocessing is covered by the script 020groopsGnssPreprocessing.xml in the example scenario.
It is recommended to perform the steps below in a loop over all satellites/PRNs using LoopPrograms. To get the relation between {prn} and {svn} setup an additional loop:platformEquipment inside loop:loop with
This second loop should perform only one step. The following programs are looped over all {prn}:
InstrumentResample: resample approximate orbits from data preparation to target sampling (e.g., 1 minute) by defining a timeSeries based on a method:polynomial (polynomialDegree=7, maxDataPointRange=7200, maxExtrapolationDistance=900).
OrbitAddVelocityAndAcceleration: add velocity via running polynomial (polynomialDegree=2) derivation (needed for attitude computation)
The script 030groopsGnssProcessing.xml in the example scenario implements the following steps and settings.
These are the settings for GnssProcessing. If not otherwise stated use the default values.
The first step is setting the processing sampling, in this example it is 30 seconds. The processing interval usually is a single 24-hour day, therefore define timeSeries:uniformSampling with timeStart=<mjd>, timeEnd=<mjd>+1, sampling=30/86400 (processing sampling).
Add the appropriate transmitters:gnss (e.g. GPS, GLONASS, and Galileo) and provide the required files:
Finally, define the processingSteps. This can be overwhelming at first, but offers a lot of flexibility. The example script uses a 5-minute processing sampling with subsequent clock densification to 30 seconds.
selectEpochs: with nthEpoch=10 to reduce sampling to 5 minutes.
selectParametrizations: disable constraint.STEC, *VTEC, *.tecBiases as the ionosphere parameters are estimated in the final steps only.
With some additional steps, the full 30-second sampling can be used to estimate all parameters (not only the clocks). These steps are disabled in the example script, as they require at least 16 GB of system memory. In this case, it is not necessary to separately write the 30-second clock files as listed above.
selectEpochs: with nthEpoch=1 to set full 30-second sampling
selectNormalsBlockStructure: As the system of normal equations can be very large, the memory consumption might be reduced with keepEpochNormalsinMemory=no. In this case the epoch parameters are directly eliminated during the accumulation and reconstructed in the solving step. This might lead to longer computation times.
estimate: with maxIterationCount=2: final iterations with full sampling
Advanced: Processing large station networks
Processing large station networks requires some additional steps to keep the computational load to a reasonable degree. The general processing strategy is to first process a well-distributed subset of stations (i.e. a core network) to get good estimates of all satellite parameters, which then enables integer ambiguity resolution (IAR). Once the ambiguities of the core network are resolved and stable estimates for satellite phase biases are available, all other (non-core) stations can be processed individually (including IAR) while keeping the satellite parameters fixed. At last, all stations can be processed together with all satellite parameters and ionosphere parameters.
'},
'cookbook.gnssPpp': { 'name': 'cookbook.gnssPpp', 'key': 'cookbook.gnssPpp', 'description': 'This cookbook chapter describes an example of GNSS precise point positioning (PPP) for a ground station using GPS, GLONASS, and Galileo. For information on how to generate the GNSS products (orbits, clocks, signal biases, etc.) required for PPP, see the cookbook GNSS satellite orbit determination and station network analysis . Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections can be found in a doctoral thesis available under DOI . An example scenario for this task is available at https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioGnssPPP.zip . It includes GROOPS scripts and data for the example, but not the general GROOPS data and metadata found at https://ftp.tugraz.at/pub/ITSG/groops (data folder or zipped archive). The scenario generally represents what is described in this cookbook, but may slightly differ in certain settings. Most of the required metadata files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops . These files are regularly updated. Data that has to be gathered from other sources comprises: Receiver observations : GNSS measurements converted from RINEX format (see RinexObservation2GnssReceiver ) Precise orbits : precise orbits in CRF for orbit integration (see Sp3Format2Orbit ) Precise clocks : precise clocks (see GnssClockRinex2InstrumentClock ) Attitude : rotation from body frame to CRF (see SimulateStarCameraGnss or GnssOrbex2StarCamera ) Signal biases : code (and phase) biases (see GnssSinexBias2SignalBias ) Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the . GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of . The includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats. Prepare a station list file that contains the stations (one per line) to be processed. The script 02groopsGnssProcessing.xml in the implements the following steps and settings. These are the settings for GnssProcessing . If not otherwise stated use the default values. The first step is setting the processing sampling, in this example it is 30 seconds. The processing interval usually is a single 24-hour day, section with timeStart = <mjd> , timeEnd = <mjd>+1 , sampling = 30/86400 (processing sampling). Add the appropriate section (e.g. GPS, GLONASS, and Galileo) and provide the required files (from Data preparation ): section section section The following settings are needed in section : section : list of all stations to be processed section : The converted RINEX observation file. section : Use the settings described in receiver:stationNetwork . section : We recommend to explicitly specify the signals to be processed and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG , C1WG , C2WG , L1*G , L2*G , ...). section : Signals you might want to exclude are L5*G (GPS L5 phase due to time-variable bias on block IIF satellites), *3*R (GLONASS G3 freq.), *6*E (Galileo E6 freq.) Add the following section and define the outputfiles you are interested in inside each of them: section : add a constraint of sigmaSTEC = 40 section section : delete section section : provide section from Data preparation . section : if precise transmitter phase biases are available you can delete section section : delete section , set sigmaZeroMeanConstraint = 0 section : delete section , set sigmaZeroMeanConstraint = 0 section section : select section with the appropriate vmf3grid file. Add section with linear ( degree = 1 ) 2-hourly splines and section with linear daily splines. section : loose constraint troposphere estimates, section = troposphere* , sigma = 5 , and relativeToApriori = yes Add the following section : section : with maxIterationCount = 8 section section : with maxIterationCount = 2 section When processing multiple stations at the same time, moving section and section into the processing step section sets up and solves the normal equations independently for each station.', 'config_table': '', 'display_text': 'This cookbook chapter describes an example of GNSS precise point positioning (PPP) for a ground station using GPS, GLONASS, and Galileo. For information on how to generate the GNSS products (orbits, clocks, signal biases, etc.) required for PPP, see the cookbook GNSS satellite orbit determination and station network analysis.
Scientific details about the underlying processing approach and the applied parametrizations, models, and corrections can be found in a doctoral thesis available under DOI 10.3217/978-3-85125-885-1.
Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the IGS Data Centers. GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of Graz University of Technology\'s contribution to IGS repro3.
The example scenario includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
Prepare a station list file that contains the stations (one per line) to be processed.
Processing of a ground station
The script 02groopsGnssProcessing.xml in the example scenario implements the following steps and settings.
These are the settings for GnssProcessing. If not otherwise stated use the default values.
The first step is setting the processing sampling, in this example it is 30 seconds. The processing interval usually is a single 24-hour day, timeSeries:uniformSampling with timeStart=<mjd>, timeEnd=<mjd>+1, sampling=30/86400 (processing sampling).
useType: We recommend to explicitly specify the signals to be processed and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG, C1WG, C2WG, L1*G, L2*G, ...).
excludeType: Signals you might want to exclude are L5*G (GPS L5 phase due to time-variable bias on block IIF satellites), *3*R (GLONASS G3 freq.), *6*E (Galileo E6 freq.)
Add the following parametrizations and define the outputfiles you are interested in inside each of them:
When processing multiple stations at the same time, moving estimate and resolveAmbiguities into the processing step forEachReceiverSeparately sets up and solves the normal equations independently for each station.
'},
'cookbook.kinematicOrbit': { 'name': 'cookbook.kinematicOrbit', 'key': 'cookbook.kinematicOrbit', 'description': 'This cookbook chapter describes exemplarily the steps for determining kinematic orbits of low-Earth orbit (LEO) satellites. An example scenario for this task is available at https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioLeoKinematicOrbit.zip . It includes GROOPS scripts and data for the example, but not the general GROOPS data and metadata found at https://ftp.tugraz.at/pub/ITSG/groops (data folder or zipped archive). The scenario generally represents what is described in this cookbook, but may slightly differ in certain settings. Most of the required metadata files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops . These files are regularly updated. Data that has to be gathered from other sources comprises: Receiver observations : GNSS measurements converted from RINEX format (see RinexObservation2GnssReceiver ) Precise orbits : precise orbits in CRF for orbit integration (see Sp3Format2Orbit ) Precise clocks : precise clocks (see GnssClockRinex2InstrumentClock ) Attitude : rotation from body frame to CRF (see SimulateStarCameraGnss or GnssOrbex2StarCamera ) Signal biases : code (and phase) biases (see GnssSinexBias2SignalBias ) Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the . GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of . The includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats. Metadata for several LEO missions is availabe at https://ftp.tugraz.at/pub/ITSG/groops/data/gnss/receiverLowEarthOrbiter . If you want to process another mission, you can create the necessary files with these steps: For creating the GnssSatelliteInfo file use PlatformCreate . Note that the rotation from the satellite reference frame into the antenna reference frame, as well as the change of the center of mass due to fuel consumption, has to be considered here. The GnssReceiverDefinition file can be created by using GnssReceiverDefinitionCreate . Here you can specify which GNSS signal types the receiver observes. For creating the GnssAntennaDefinition file use GnssAntennaDefinitionCreate . Here you can define phase center offsets for the antenna. For determining the elevation dependent accuracies the program GnssAntennaDefinitionCreate is used again. antenna : set to new Set the pattern s for code ( type = C** ) and phase ( type = L** ). The standard deviation is expressed e.g. with values = 0.001/cos(2*PI/180*zenith) . The includes a small set of this data for the GRACE-FO mission. The script 020groopsConvertGracefo.xml can be used to convert these external formats into GROOPS formats. The data preparation steps are: Conversion of the approximate orbit and star camera data into GROOPS format using a conversion program. If no attitude data is given the star camera data can be simulated by using SimulateStarCamera or SimulateStarCameraSentinel1 . The GNSS observation data (given in RINEX format) can be converted with RinexObservation2GnssReceiver . Suitable programs to get daily data are InstrumentConcatenate and InstrumentSynchronize . For interpolating the orbit and star camera data to GNSS receiver epochs use InstrumentResample and provide the converted RINEX observation file as input for section . For synchronizing these data use InstrumentSynchronize . Detailed description of instrument data handling can be found in Instrument data handling . The script 03groopsGnssProcessing.xml in the implements the following steps and settings. These are the settings for GnssProcessing . If not otherwise stated use the default values. As we have only one receiver the processing sampling can be directly taken from the observation file: section . Add the appropriate section (e.g. GPS) and provide the required files (from Prepare GNSS satellite data ): section section section The following files (from Prepare LEO metadata and Prepare LEO data ) and settings are needed in section : section : The satellite info file. section section section section : The converted RINEX observation file. section : The approximate orbit. section : The convered or simulated attitude. section : We recommend to explicitly specify the signals to be processed and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG , C1WG , C2WG , L1*G , L2*G , ...). Add the following section and define the outputfiles within you are interested in: section : add a constraint of sigmaSTEC = 40 section : set mapR = 6371e3+450e3 to satellite height and ionosphere height mapH = 50e3 above. section : delete section section : provide section created with GnssSinexBias2SignalBias section : if precise transmitter phase biases are available you can delete section section : delete section , set sigmaZeroMeanConstraint = 0 section : delete section , set sigmaZeroMeanConstraint = 0 section Add the following section : section : with maxIterationCount = 8 section section : with maxIterationCount = 2 section section', 'config_table': '', 'display_text': 'This cookbook chapter describes exemplarily the steps for determining kinematic orbits of low-Earth orbit (LEO) satellites.
Receiver observations, precise satellite orbits and clocks, and possibly attitude and signal biases can be downloaded from the IGS Data Centers. GPS, GLONASS, and Galileo orbits, clocks, attitude, and signal biases for the period 1994-2020 are also available as part of Graz University of Technology\'s contribution to IGS repro3.
The example scenario includes a small set of this data. The script 010groopsConvert.xml can be used to convert these external formats into GROOPS formats.
If you want to process another mission, you can create the necessary files with these steps:
For creating the GnssSatelliteInfo file use PlatformCreate. Note that the rotation from the satellite reference frame into the antenna reference frame, as well as the change of the center of mass due to fuel consumption, has to be considered here.
Set the patterns for code (type=C**) and phase (type=L**). The standard deviation is expressed e.g. with values=0.001/cos(2*PI/180*zenith).
Prepare LEO data
The example scenario includes a small set of this data for the GRACE-FO mission. The script 020groopsConvertGracefo.xml can be used to convert these external formats into GROOPS formats.
The data preparation steps are:
Conversion of the approximate orbit and star camera data into GROOPS format using a conversion program.
For interpolating the orbit and star camera data to GNSS receiver epochs use InstrumentResample and provide the converted RINEX observation file as input for timeSeries:instrument.
useType: We recommend to explicitly specify the signals to be processed and to make sure that at least transmitter code biases are provided for each of them, e.g. C1CG, C1WG, C2WG, L1*G, L2*G, ...).
Add the following parametrizations and define the outputfiles within you are interested in:
'},
'cookbook.gravityFieldPod': { 'name': 'cookbook.gravityFieldPod', 'key': 'cookbook.gravityFieldPod', 'description': 'This cookbook chapter describes exemplarily the steps for determining the monthly gravity variations from precise orbit data (POD). Following data have to be prepared monthly with an adequate sampling, e.g. 10 s using InstrumentConcatenate : Precise (kinematic) orbit data 3x3 covariance matrices data Initial orbit data used for precise orbit determination Star camera data Accelerometer data Reduced sampling can be achieved by InstrumentReduceSampling . If the satellite mission does not provide any required accelerometer data, these data can be generated via SimulateAccelerometer . For satellite missions with less knowledge about the acting forces, it makes sense to consider more than one state vector within an orbit revolution. Otherwise the accuracy of the estimated parameters will decrease. This implies that shorter arcs are necessary. The assignment of the kinematic orbit data as well as the 3x3 covariance matrices data to the arcs can be done with InstrumentSynchronize . Gravityfield2SphericalHarmonicsVector converts the static respectively background gravity field into spherical harmonics. For determining the accuracies and weights of the kinematic orbits it is sufficient to make a least-square estimation with only certain parameters, due to the fact that some parameters do not influence the estimation of the accuracies and weights. This estimation is done with PreprocessingPod . Additionally, this program determines the temporal correlation of the kinematic orbit positions x, y and z. If short arcs are used the setting section shall be used. This setting considers the frictional forces by means of a macro model as well as the conservative and non-conservative forces. NormalsSolverVCE sets up the observation equations and summarized them to a normal equations system. The subsequent least-square estimation delivers the parameters surcharges. The estimated parameters result from the re-addition of the background field, which is done in MatrixCalculate . Gravityfield2PotentialCoefficients converts the gravity field parameters into spherical harmonics.', 'config_table': '', 'display_text': '
This cookbook chapter describes exemplarily the steps for determining the monthly gravity variations from precise orbit data (POD).
Step 1: Preperation of data
Following data have to be prepared monthly with an adequate sampling, e.g. 10 s using InstrumentConcatenate:
Precise (kinematic) orbit data
3x3 covariance matrices data
Initial orbit data used for precise orbit determination
Star camera data
Accelerometer data
Reduced sampling can be achieved by InstrumentReduceSampling. If the satellite mission does not provide any required accelerometer data, these data can be generated via SimulateAccelerometer.
For satellite missions with less knowledge about the acting forces, it makes sense to consider more than one state vector within an orbit revolution. Otherwise the accuracy of the estimated parameters will decrease. This implies that shorter arcs are necessary. The assignment of the kinematic orbit data as well as the 3x3 covariance matrices data to the arcs can be done with InstrumentSynchronize.
Step 2: Conversion of the background gravity field
For determining the accuracies and weights of the kinematic orbits it is sufficient to make a least-square estimation with only certain parameters, due to the fact that some parameters do not influence the estimation of the accuracies and weights. This estimation is done with PreprocessingPod. Additionally, this program determines the temporal correlation of the kinematic orbit positions x, y and z. If short arcs are used the setting observation:podIntegral shall be used. This setting considers the frictional forces by means of a macro model as well as the conservative and non-conservative forces.
Step 4: Solving of normal equations system
NormalsSolverVCE sets up the observation equations and summarized them to a normal equations system. The subsequent least-square estimation delivers the parameters surcharges.
Step 5: Determination of the estimated gravity field parameters
The estimated parameters result from the re-addition of the background field, which is done in MatrixCalculate.
Step 6: Conversion of the gravity field parameters
'},
'cookbook.gravityFieldGrace': { 'name': 'cookbook.gravityFieldGrace', 'key': 'cookbook.gravityFieldGrace', 'description': 'This cookbook chapter describes an example of estimating a gravity field solution using GRACE observation data. For the respective month a set of spherical harmonic coefficients up to a maximum degree is determined. An example scenario for this task can be found at https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioGraceGravityfieldRecovery.zip including the required GROOPS scripts and data sets for the gravity field recovery process. The provided scenario consists of a slightly simplified example as compared to the operational one. The background models are provided at https://ftp.tugraz.at/pub/ITSG/groops/data/ . The following background models were used during the data processing: Earth rotation : Moon, sun and planets ephemerides : Earth tide : Ocean tide : Pole tide : Ocean pole tide : Atmospheric tides : Atmosphere and Ocean Dealiasing : Sub-monthly continental hydrology : Relativistic corrections : These models were reduced during the analysis process and are not present in the solution. The model was used as the static gravity field as well as for the trend component and annual oscillation. In the script 000groopsBackgroundModels.xml a monthly mean of the GOCO06s including the time-variable components is determined in form of time splines using Gravityfield2TimeSplines . This model is later added back to the final gravity solution. The ITSG gravity field solutions are computed from the official GRACE L1B and GRACE-FO L1B observation data. The data sets for this example are provided in GROOPS file format in the scenario folder. The satellite-to-satellite-tracking (SST) data consists of: K-band range rates Light time correction Antenna offset corrections Additional observation data required for the processing comprises: Star camera observations Accelerometer data Approximate orbits Thruster data The determination of Kinematic orbits 3x3 epoch covariances is depicted in Kinematic orbit determination of LEO satellites . These data sets are also provided in the scenario folder. Data preparation is handled in the script 010groopsInstruments.xml . The approximate orbits (initial dynamic orbits) of the satellites, the star camera observations, the accelerometer data and the thruster data are resampled with a 5s sampling and small gaps in the data are filled using InstrumentResample . Gross outliers are removed using InstrumentRemoveEpochsByCriteria and the data is synchronized using InstrumentSynchronize . The approximate orbits are later used as a priori information for the dynamic orbit integration. In addition to the observed orientation of the spacecrafts (star camera observations), the nominal orientation is computed using SimulateStarCameraGrace . The difference between observed and simulated orientation is determined using InstrumentStarCameraMultiply and is employed in the outlier detection. The accelerometer data is initially calibrated by estimating a bias using InstrumentAccelerometerEstimateBiasScale with respect to simulated data created with SimulateAccelerometer . For simulating accelerometer data a satellite model implying the satellite\'s mass and surfaces is required. Such a model can be created with SatelliteModelCreate . Models for the GRACE and GRACE-FO satellites are also provided at https://ftp.tugraz.at/pub/ITSG/groops/data/satelliteModel/ . Non-gravitational forces comprising atmospheric drag, solar radiation pressure and albedo have to be modeled when simulating the accelerometer data. The acceleration bias parameters are determined as degree 3 time splines with 6h nodes. When determining these parameters the thruster events are excluded from the estimation. The SST observations, the light time corrections and the antenna center corrections are synchronized with a 5s sampling together with simulated SST data created with SimulateSatelliteTracking . Simulated data is used for the outlier detection of the original SST observations. The sampling of the kinematic orbits is reduced to 60s using InstrumentReduceSampling and an outlier detection is performed using the approximate dynamic orbits. The approximate orbits, the star camera observations and the accelerometer data are divided into 24h arcs (variational arcs). The kinematic orbits, its 3x3 epoch covariances, KBR observations, light time corrections, antenna center corrections and star camera observations are divided into 3h arcs per day (short arcs). Additionally the approximate orbits and the star camera observations are also synchronized to short arcs. Further information on instrument data preparation can be found in Instrument data handling . In this processing step dynamic orbits are computed for a complete 24h orbit arc by integrating the forces acting on the GRACE/GRACE-FO satellites. Additionally, the state transition matrix is set up. The dynamic orbits are then fitted to kinematic orbits and SST observations in a least squares adjustment by co-estimating additional accelerometer calibration parameters together with the initial state vector. The newly estimated parameters are then used to re-estimate the dynamic orbits and setting up the new state transition matrix. The script 020groopsVariational.xml in the scenario folder implements the required processing steps. Time splines from a time-variable gravity field are estimated using Gravityfield2TimeSplines . In this step the static gravity field (GOCO06s) is combined with the following time-variable components: section : static gravity field section section : trend component of gravity field section section : annual cosine component of gravity field section : annual sine component of gravity field section : atmosphere and ocean dealiasing (AOD1B RL06) section : ocean tides (FES2014b) section : atmospheric tides (TiME22) section : pole tides (IERS 2010) section : ocean pole tides (IERS 2010) maxDegree = 220 and sampling = 10/1440 is sufficient. In PreprocessingVariationalEquation the variational equations comprising the integrated orbit together with the state transition matrix are stored in section . This program has to be executed for both GRACE or GRACE-FO satellites and it is recommended to use LoopPrograms . section : satellite model from 020groopsInstruments.xml section : the approximate orbits from 020groopsInstruments.xml section : the attitude file from 020groopsInstruments.xml section : the accelerometer data from 020groopsInstruments.xml forces : see below section : JPL DE432 section : a static gravity field (GOCO06s) with maxDegree = 10 is more than sufficient. The section include: section : the previously estimated time-variable gravity field section : astronomical tides (based on JPL DE432 ephemerides) section : Earth tide (IERS conventions) section : relativistic effects (IERS conventions) In PreprocessingVariationalEquationOrbitFit the integrated orbit ( section ) is fitted to the kinematic orbit ( section ) by least squares adjustment. The additional accelerometer calibration parameters can be defined by section : accelerometer scale factor (once per day) section : accelerometer bias (time spline with 6h nodes) The observation equations (parameter sensitivity matrix) are computed by integration of the variational equations ( section ) using a polynomial with integrationDegree = 7 . PreprocessingVariationalEquationOrbitFit has to be executed per satellite. PreprocessingVariationalEquationSstFit fits two dynamic orbits section to the SST observations and the kinematic orbits. rightHandSide : input for observation vectors section : K-band range rate observations section : light time correction section : antenna offset corrections section : kinematic orbit of satellite 1 section : kinematic orbit of satellite 2 sstType : rangeRate section : dynamic orbit and integrated state matrix of satellite 1 section : dynamic orbit and integrated state matrix of satellite 2 section : same as in In PreprocessingVariationalEquationOrbitFit section : same as in In PreprocessingVariationalEquationOrbitFit integrationDegree : 7 interpolationDegree : 7 section sigma : 1 section sigma : 1 section : 3x3 epoch covariances section sigma : 1 section : 3x3 epoch covariances The estimated accelerometer calibration parameters from PreprocessingVariationalEquationOrbitFit and PreprocessingVariationalEquationSstFit are determined as corrections and stored in section . Both correction estimates have to be summed up using FunctionsCalculate . The dynamic orbit and the resulting accelerometer calibration parameters are now used to re-integrate the orbit once more using PreprocessingVariationalEquation and introducing section as estimatedParameters . This step usually ensures convergence. If the maximum orbit difference is still not sufficient this step can be repeated again. The script 030groopsPreprocessing.xml implements the following steps and settings. The program PreprocessingSst processes SST observations and kinematic orbit data, and performs a complete least squares adjustment for gravity field determination by computing the observations equations. It also allows for an iterative refinement of the stochastic model of the observations along with arc-wise variance factors through variance component estimation (VCE). Force model parameters (gravitational potential coefficients and accelerometer calibration parameters) are computed by integrating the parameter sensitivity matrix from the variational equations. Parameters describing effects due to the SST observation system and geometry (KBR antenna phase center variations) are computed using the dynamic orbits as a Taylor point. Short time gravity variations can be co-estimated together with the monthly mean gravity field. The autoregressive model sequence constraining the short time parameters is provided in the data folder. It is precomputed from hydrology and nontidal atmospheric and ocean background models. See for more information about this co-estimation. section : sstVariational rightHandSide : section : KBR range rates section : light time correction section : antenna offset corrections section : kinematic orbit of satellite 1 section : kinematic orbit of satellite 2 sstType : rangeRate section : dynamic orbit and integrated state matrix of satellite 1 section : dynamic orbit and integrated state matrix of satellite 2 section : JPL DE432 section : spherical harmonics from minDegree = 2 to maxDegree = 60 section : high frequency parametrization section : same as in In PreprocessingVariationalEquationOrbitFit section : same as in In PreprocessingVariationalEquationOrbitFit section : antenna phase center variations (y and z for both satellites) integrationDegree : 7 interpolationDegree : 7 section sigma : 1e-7 sampling : 5 [seconds] section sigma : 2 section : 3x3 epoch covariances sampling : 60 [seconds] estimateShortTimeVariations section : AR models section : names section : parametrizationGravity section : high frequency parametrization ParameterSelection2IndexVector and MatrixCalculate with section can be used to extract the desired spherical harmonic coefficients from section and the respective standard deviations from section up to a certain degree. In the program Gravityfield2PotentialCoefficients the estimated spherical harmonics coefficients are read with section . The monthly mean gravity field can be added back by additionally selecting the time splines created in 000groopsBackgroundModels.xml using section . The preprocessing solution is saved as a spherical harmonics file . Normal equations are set up in the script 040groopsMonthlyNormals120.xml using the program NormalsBuildShortTimeStaticLongTime . The time intervals which the normal equations are divided into are defined in section . The normal equations are based on section including the SST data, the kinematic orbits and the variational equations. The parametrization of the gravity field can be set with section (e.g. spherical harmonics up to degree and order 120). Accelerometer calibration parameters and KBR antenna phase center variations can be parameterized using section and section . With estimateShortTimeVariations short time variations of the gravity field can be co-estimated. The parameters selected by section (e.g. linear splines with 6h nodes) are constrained by an section . Additional temporal variations (e.g. trend and annual oscillation) could be estimated with estimateLongTimeVariations . The desired spherical harmonic coefficients are determined in the script 050groopsMonthlySolve.xml . NormalsSolverVCE accumulates section and solves the total combined system. Variance component estimation is used to determine the relative weighting of the individual normals, i.e. the arc-wise variances. The previously computed stochastic model of the observations remains unchanged. The estimated parameter vector ( section ), the estimated accuracies ( section ) and the full covariance matrix ( section ) can be saved. Using Gravityfield2PotentialCoefficients the final solution can be saved as a spherical harmonics file by adding back the monthly mean gravity field to the estimated spherical harmonic coefficients.', 'config_table': '', 'display_text': 'This cookbook chapter describes an example of estimating a gravity field solution using GRACE observation data. For the respective month a set of spherical harmonic coefficients up to a maximum degree is determined. An example scenario for this task can be found at https://ftp.tugraz.at/pub/ITSG/groops/scenario/scenarioGraceGravityfieldRecovery.zip including the required GROOPS scripts and data sets for the gravity field recovery process. The provided scenario consists of a slightly simplified example as compared to the operational one. The background models are provided at https://ftp.tugraz.at/pub/ITSG/groops/data/.
Background models
The following background models were used during the data processing:
These models were reduced during the analysis process and are not present in the solution. The GOCO06s model was used as the static gravity field as well as for the trend component and annual oscillation. In the script 000groopsBackgroundModels.xml a monthly mean of the GOCO06s including the time-variable components is determined in form of time splines using Gravityfield2TimeSplines. This model is later added back to the final gravity solution.
Instrument data preparation
The ITSG gravity field solutions are computed from the official GRACE L1B JPL (2018) and GRACE-FO L1B JPL (2019) observation data. The data sets for this example are provided in GROOPS file format in the scenario folder.
The satellite-to-satellite-tracking (SST) data consists of:
K-band range rates
Light time correction
Antenna offset corrections
Additional observation data required for the processing comprises:
Data preparation is handled in the script 010groopsInstruments.xml. The approximate orbits (initial dynamic orbits) of the satellites, the star camera observations, the accelerometer data and the thruster data are resampled with a 5s sampling and small gaps in the data are filled using InstrumentResample. Gross outliers are removed using InstrumentRemoveEpochsByCriteria and the data is synchronized using InstrumentSynchronize.
The approximate orbits are later used as a priori information for the dynamic orbit integration. In addition to the observed orientation of the spacecrafts (star camera observations), the nominal orientation is computed using SimulateStarCameraGrace. The difference between observed and simulated orientation is determined using InstrumentStarCameraMultiply and is employed in the outlier detection.
The accelerometer data is initially calibrated by estimating a bias using InstrumentAccelerometerEstimateBiasScale with respect to simulated data created with SimulateAccelerometer. For simulating accelerometer data a satellite model implying the satellite\'s mass and surfaces is required. Such a model can be created with SatelliteModelCreate. Models for the GRACE and GRACE-FO satellites are also provided at https://ftp.tugraz.at/pub/ITSG/groops/data/satelliteModel/. Non-gravitational forces comprising atmospheric drag, solar radiation pressure and albedo have to be modeled when simulating the accelerometer data. The acceleration bias parameters are determined as degree 3 time splines with 6h nodes. When determining these parameters the thruster events are excluded from the estimation.
The SST observations, the light time corrections and the antenna center corrections are synchronized with a 5s sampling together with simulated SST data created with SimulateSatelliteTracking. Simulated data is used for the outlier detection of the original SST observations.
The sampling of the kinematic orbits is reduced to 60s using InstrumentReduceSampling and an outlier detection is performed using the approximate dynamic orbits.
The approximate orbits, the star camera observations and the accelerometer data are divided into 24h arcs (variational arcs). The kinematic orbits, its 3x3 epoch covariances, KBR observations, light time corrections, antenna center corrections and star camera observations are divided into 3h arcs per day (short arcs). Additionally the approximate orbits and the star camera observations are also synchronized to short arcs.
In this processing step dynamic orbits are computed for a complete 24h orbit arc by integrating the forces acting on the GRACE/GRACE-FO satellites. Additionally, the state transition matrix is set up. The dynamic orbits are then fitted to kinematic orbits and SST observations in a least squares adjustment by co-estimating additional accelerometer calibration parameters together with the initial state vector. The newly estimated parameters are then used to re-estimate the dynamic orbits and setting up the new state transition matrix.
The script 020groopsVariational.xml in the scenario folder implements the required processing steps. Time splines from a time-variable gravity field are estimated using Gravityfield2TimeSplines. In this step the static gravity field (GOCO06s) is combined with the following time-variable components:
The observation equations (parameter sensitivity matrix) are computed by integration of the variational equations (inputfileVariational) using a polynomial with integrationDegree=7. PreprocessingVariationalEquationOrbitFit has to be executed per satellite.
The dynamic orbit and the resulting accelerometer calibration parameters are now used to re-integrate the orbit once more using PreprocessingVariationalEquation and introducing parametrizationAcceleration as estimatedParameters. This step usually ensures convergence. If the maximum orbit difference is still not sufficient this step can be repeated again.
Preprocessing
The script 030groopsPreprocessing.xml implements the following steps and settings. The program PreprocessingSst processes SST observations and kinematic orbit data, and performs a complete least squares adjustment for gravity field determination by computing the observations equations. It also allows for an iterative refinement of the stochastic model of the observations along with arc-wise variance factors through variance component estimation (VCE). Force model parameters (gravitational potential coefficients and accelerometer calibration parameters) are computed by integrating the parameter sensitivity matrix from the variational equations. Parameters describing effects due to the SST observation system and geometry (KBR antenna phase center variations) are computed using the dynamic orbits as a Taylor point. Short time gravity variations can be co-estimated together with the monthly mean gravity field. The autoregressive model sequence constraining the short time parameters is provided in the data folder. It is precomputed from hydrology and nontidal atmospheric and ocean background models. See Kvas 2019 for more information about this co-estimation.
Normal equations are set up in the script 040groopsMonthlyNormals120.xml using the program NormalsBuildShortTimeStaticLongTime. The time intervals which the normal equations are divided into are defined in inputfileArcList. The normal equations are based on observation including the SST data, the kinematic orbits and the variational equations. The parametrization of the gravity field can be set with observation:parametrizationGravity (e.g. spherical harmonics up to degree and order 120). Accelerometer calibration parameters and KBR antenna phase center variations can be parameterized using parametrizationAcceleration and parametrizationSst. With estimateShortTimeVariations short time variations of the gravity field can be co-estimated. The parameters selected by parameterSelection (e.g. linear splines with 6h nodes) are constrained by an autoregressiveModelSequence. Additional temporal variations (e.g. trend and annual oscillation) could be estimated with estimateLongTimeVariations.
Solving normal equations
The desired spherical harmonic coefficients are determined in the script 050groopsMonthlySolve.xml. NormalsSolverVCE accumulates normalEquation and solves the total combined system. Variance component estimation is used to determine the relative weighting of the individual normals, i.e. the arc-wise variances. The previously computed stochastic model of the observations remains unchanged. The estimated parameter vector (outputfileSolution), the estimated accuracies (outputfileSigmax) and the full covariance matrix (outputfileCovariance) can be saved. Using Gravityfield2PotentialCoefficients the final solution can be saved as a spherical harmonics file by adding back the monthly mean gravity field to the estimated spherical harmonic coefficients.
'},
'cookbook.regionalGeoid': { 'name': 'cookbook.regionalGeoid', 'key': 'cookbook.regionalGeoid', 'description': 'This shows the exemplary computation of a regional geoid using terrestrial gravimetric observations in combination with a global satellite model such as GOCO06s. The geoid is estimated in a least squares adjustment with a parametrization using radial basis functions. A detailed desciption of the method is given in Christian Pock (2017), Consistent Combination of Satellite and Terrestrial Gravity Field Observations in Regional Geoid Modeling. Dissertation TU Graz. Here it is assumed that the measured absolute gravity data is given at points in ellipsoidal coordinates. The observed values should be converted to SI units . Matrix2GriddedData to convert data from text file in tabular form. A high resolution topography model is needed to reduce the observations. As the model heights are usually given in physical heights a reference geoid is needed to compute the correct ellipsoidal height. NetCdf2GridRectangular convert into groops format. Gravityfield2GriddedData : Compute geoid heights using the GOCO06s model with section The topography grid section section the GOCO06s model section Subtract ( factor =-1) GRS680 normal field. GriddedDataCalculate : Generate a new combined griddedData file with the orthometric height ( data0 ) and the geoid height ( data1 ). GriddedTopography2PotentialCoefficients : Compute the gravitational potential in terms of spherical harmonics up to a maximum degree of the global satellite model. This is the part of the topography, which is already included in the global satellite model. The integration boundaries are radialUpperBound = data0+data1 and radialLowerBound = data1 . Calculate approximate reference gravity to reduce it from the observations. Gravityfield2AbsoluteGravity section at observation positions section Centrifugal potential section full GOCO06s model section ( radialUpperBound = data0+data1 , radialLowerBound = data1 ) section Subtract ( factor =-1) the potential part of the topography already included in the GOCO06s model. GriddedDataCalculate to calculate observed minus computed. Large outliers can be removed in GriddedDataCalculate with removalCriteria . The residual gravity is parametrized in terms of Radial Basis Functions section . The basis functions should be distributed on a regular section covering a somewhat larger area than the observations, see section . The shape of the functions section should reflect the signal content of reduced observations and are defined by the coefficients. RadialBasisSplines2KernelCoefficients section accuracies of GOCO06s model maxDegree =7000. Complemented by Kaula\'s rule of thumb The maximum degree should correspond to the spatial resolution. Rule of thumb: the number of spherical harmonic coefficients should roughly agree to the number of grid points if they would cover the complete Earth. Setup the observation equations and accumulate the system of normal equations. NormalsBuild section with section section section with section NormalsSolverVCE section from NormalsBuild section towards zero means regularization towards the GOCO06s, which is reduced from the data. Evaluate the estimated parameters and add back the reduced reference models. Gravityfield2GriddedData : Compute approximate geoid heights using the GOCO06s model with section select a grid with target resolution at elliposid section section GOCO06s model section Subtract ( factor =-1) GRS80 normal field. GriddedDataCalculate : Move points from ellipsoid to geoid with height = data0 Gravityfield2GriddedData section from above. section section The solution vector together with the RBF parametrization section GOCO06s model section ( radialUpperBound = data0+data1 , radialLowerBound = data1 ) section Subtract ( factor =-1) the potential part of the topography already included in the GOCO06s model. section Subtract ( factor =-1) GRS80 normal field. convertToHarmonics = no , otherwise the RBF are converted to harmonics up to degree 7000. GriddedDataCalculate : Set height =0 of the computed geoid grid. GridRectangular2NetCdf', 'config_table': '', 'display_text': 'This shows the exemplary computation of a regional geoid using terrestrial gravimetric observations in combination with a global satellite model such as GOCO06s. The geoid is estimated in a least squares adjustment with a parametrization using radial basis functions. A detailed desciption of the method is given in Christian Pock (2017), Consistent Combination of Satellite and Terrestrial Gravity Field Observations in Regional Geoid Modeling. Dissertation TU Graz.
Gravimetric data
Here it is assumed that the measured absolute gravity data is given at points in ellipsoidal coordinates. The observed values should be converted to SI units $m/s^2$.
A high resolution topography model is needed to reduce the observations. As the model heights are usually given in physical heights a reference geoid is needed to compute the correct ellipsoidal height.
GriddedDataCalculate: Generate a new combined griddedData file with the orthometric height (data0) and the geoid height (data1).
GriddedTopography2PotentialCoefficients: Compute the gravitational potential in terms of spherical harmonics up to a maximum degree of the global satellite model. This is the part of the topography, which is already included in the global satellite model. The integration boundaries are radialUpperBound=data0+data1 and radialLowerBound=data1.
Reduce
Calculate approximate reference gravity to reduce it from the observations.
The residual gravity is parametrized in terms of Radial Basis Functions parametrizationGravity:radialBasis. The basis functions should be distributed on a regular grid covering a somewhat larger area than the observations, see border. The shape of the functions kernel:coefficients should reflect the signal content of reduced observations and are defined by the coefficients.
maxDegree=7000. Complemented by Kaula\'s rule of thumb
The maximum degree should correspond to the spatial resolution. Rule of thumb: the number of spherical harmonic coefficients $(\\text{maxDegree}+1)^2$ should roughly agree to the number of grid points if they would cover the complete Earth.
Compute: Estimate parameters in a least squares adjustment
Setup the observation equations and accumulate the system of normal equations.
'},
'AutoregressiveModel2CovarianceMatrix': { 'name': 'AutoregressiveModel2CovarianceMatrix', 'key': 'AutoregressiveModel2CovarianceMatrix', 'description': 'This program computes the covariance structure of a random process represented by an AR model sequence. The covariance matrix is determined by accumulating the normal equations of all AR models in autoregressiveModelSequence and inverting the combined normal equation matrix. For each output file in This program computes the covariance structure of a random process represented by an AR model sequence. The covariance matrix is determined by accumulating the normal equations of all AR models in , the covariance matrix of appropriate time lag is saved (the first file contains the auto-covariance, second file cross covariance and so on). The matrix for lag describes the covariance between and , i.e. .', 'config_table': 'outputfileCovarianceMatrix filename covariance matrix for each lag autoregressiveModelSequence autoregressiveModelSequenceType AR model sequence', 'display_text': 'This program computes the covariance structure of a random process represented by an AR model sequence. The covariance matrix is determined by accumulating the normal equations of all AR models in autoregressiveModelSequence and inverting the combined normal equation matrix. For each output file in outputfileCovarianceMatrix, the covariance matrix of appropriate time lag is saved (the first file contains the auto-covariance, second file cross covariance and so on). The matrix for lag $h$ describes the covariance between $x_{t-h}$ and $x_{t}$, i.e. $\\Sigma(t-h, t)$.'},
'CovarianceFunction2DigitalFilter': { 'name': 'CovarianceFunction2DigitalFilter', 'key': 'CovarianceFunction2DigitalFilter', 'description': 'Computes digital filter coefficients for a Computes digital filter coefficients for a of given degree and order. The filter coefficients are computed by fitting them to an approximated impulse response represented by the cholesky factor of the covariance matrix. The parameter warmup determines from which element of the cholesky matrix the coefficients (default: half the covariance length) are fitted. Per default, the program computes filter coefficients which generate colored noise when applied to a white noise sequence. When decorrelationFilter is set, a decorrelation filter is computed which yields white noise when applied to colored noise.', 'config_table': 'outputfileFilter filename filter coefficients inputfileCovariance filename first column: time steps, following columns: covariance functions column uint Column with covariance function to be fitted warmup uint number of samples until diagonal of Cholesky factor is flat (default: half covariance length) numeratorDegree uint Maximum degree of numerator polynomial (MA constituent) denominatorDegree uint Maximum degree of denominator polynomial (AR constitutent) decorrelationFilter boolean compute a decorrelation filter', 'display_text': 'Computes digital filter coefficients for a digital filter of given degree and order. The filter coefficients are computed by fitting them to an approximated impulse response represented by the cholesky factor of the covariance matrix.
The parameter warmup determines from which element of the cholesky matrix the coefficients (default: half the covariance length) are fitted.
Per default, the program computes filter coefficients which generate colored noise when applied to a white noise sequence. When decorrelationFilter is set, a decorrelation filter is computed which yields white noise when applied to colored noise.'},
'CovarianceFunction2PowerSpectralDensity': { 'name': 'CovarianceFunction2PowerSpectralDensity', 'key': 'CovarianceFunction2PowerSpectralDensity', 'description': 'One sided Power Spectral Density (PSD) from a covariance function. The first column of One sided Power Spectral Density (PSD) from a covariance function. The first column of should contain the time lag in seconds. Multiple covariance functions (in the following column)s are supported. The output is a matrix with first column contains the frequency and the other columns the PSD . Conversion between covariance function and PSD is performed by discrete cosine transformation: See also PowerSpectralDensity2CovarianceFunction .', 'config_table': 'outputfilePSD filename first column: frequency [Hz], other columns PSD [unit^2/Hz] inputfileCovarianceFunction filename first column: time steps, following columns: covariance functions', 'display_text': 'One sided Power Spectral Density (PSD) from a covariance function. The first column of inputfileCovarianceFunction should contain the time lag in seconds. Multiple covariance functions (in the following column)s are supported. The output is a matrix with first column contains the frequency $[Hz]$ and the other columns the PSD $[unit^2/Hz]$.
Conversion between covariance function $c_j$ and PSD $p_k$ is performed by discrete cosine transformation: \\[ p_k = 2\\Delta t\\left(c_0 + c_{n-1} (-1)^k + \\sum_{j=1}^{n-2} 2 c_j \\cos(\\pi jk/(n-1))\\right). \\] See also PowerSpectralDensity2CovarianceFunction.'},
'CovarianceMatrix2AutoregressiveModel': { 'name': 'CovarianceMatrix2AutoregressiveModel', 'key': 'CovarianceMatrix2AutoregressiveModel', 'description': 'This program computes a VAR(p) model from empirical covariance matrices. The This program computes a VAR(p) model from empirical covariance matrices. The represent the covariance structure of the process: the first file should contain the auto-covariance, the second the cross-covariance of lag one, the next cross-covariance of lag two and so on. Cross-covariance matrices are defined as the cross-covariance between epoch and . If the process realizations are arrange by ascending time stamps ( ), the covariance structure of the (stationary) process is therefore given by The estimate AR model is saved as single matrix outputfileAutoregressiveModel according to the GROOPS AR model conventions.', 'config_table': 'outputfileAutoregressiveModel filename coefficients and white noise covariance of AR(p) model inputfileCovarianceMatrix filename file name of covariance matrix', 'display_text': 'This program computes a VAR(p) model from empirical covariance matrices. The inputfileCovarianceMatrix represent the covariance structure of the process: the first file should contain the auto-covariance, the second the cross-covariance of lag one, the next cross-covariance of lag two and so on.
Cross-covariance matrices $\\Sigma_{\\Delta_k}$ are defined as the cross-covariance between epoch $t-k$ and $t$. If the process realizations $x_{t}$ are arrange by ascending time stamps ($\\{\\dots, x_{t-2}, x_{t-1}, x_{t}, x_{t+1}, x_{t+2},\\dots\\}$), the covariance structure of the (stationary) process is therefore given by \\[ \\begin{bmatrix} \\Sigma & \\Sigma_{\\Delta_1} & \\Sigma_{\\Delta_2} & \\cdots \\\\ \\Sigma_{\\Delta_1}^T & \\Sigma & \\Sigma_{\\Delta_1} & \\cdots \\\\ \\Sigma_{\\Delta_2}^T & \\Sigma_{\\Delta_1}^T & \\Sigma & \\cdots \\\\ \\vdots & \\vdots & \\vdots & \\ddots \\\\ \\end{bmatrix}. \\] The estimate AR model is saved as single matrix outputfileAutoregressiveModel according to the GROOPS AR model conventions.'},
'CovarianceMatrix2Correlation': { 'name': 'CovarianceMatrix2Correlation', 'key': 'CovarianceMatrix2Correlation', 'description': 'This program computes the pearson correlation coefficient from a given covariance matrix stored in This program computes the pearson correlation coefficient . The result is stored in This program computes the pearson correlation coefficient .', 'config_table': 'outputfileCorrelationMatrix filename correlation matrix inputfileCovarianceMatrix filename covariance matrix', 'display_text': 'This program computes the pearson correlation coefficient \\[ \\rho_{ij} = \\frac{\\sigma_{ij}}{\\sigma_i \\sigma_j} \\]from a given covariance matrix stored in inputfileCovarianceMatrix. The result is stored in outputfileCorrelationMatrix.'},
'PowerSpectralDensity2CovarianceFunction': { 'name': 'PowerSpectralDensity2CovarianceFunction', 'key': 'PowerSpectralDensity2CovarianceFunction', 'description': 'Covariance function from Power Spectral Density (PSD). The Covariance function from Power Spectral Density (PSD). The contains in the first column the frequency , followed by (possibly multiple) PSDs . The output is a matrix , the first column containing time lag and the other columns the covariance functions . Conversion between PSD and covariance function is performed by discrete cosine transformation: See also CovarianceFunction2PowerSpectralDensity .', 'config_table': 'outputfileCovarianceFunction filename first column: time steps [seconds], following columns: covariance functions inputfilePSD filename first column: frequency [Hz], following columns PSD [unit^2/Hz]', 'display_text': 'Covariance function from Power Spectral Density (PSD). The inputfilePSD contains in the first column the frequency $[Hz]$, followed by (possibly multiple) PSDs $[unit^2/Hz]$. The output is a matrix, the first column containing time lag $[s]$ and the other columns the covariance functions $[unit^2]$. Conversion between PSD $p_j$ and covariance function $c_k$ is performed by discrete cosine transformation: \\[ c_k = \\frac{1}{4\\Delta t (n-1)}\\left(p_0 + p_{n-1} (-1)^k + \\sum_{j=1}^{n-2} 2 p_j \\cos(\\pi jk/(n-1))\\right). \\] See also CovarianceFunction2PowerSpectralDensity.'},
'DoodsonAdmittanceInterpolation': { 'name': 'DoodsonAdmittanceInterpolation', 'key': 'DoodsonAdmittanceInterpolation', 'description': 'To visualize the interpolation of the minor tides. The output is a matrix with the first column containing the tidal frequency, the second column is the tide generating amplitude (from To visualize the interpolation of the minor tides. The output is a ), and the following columns the contribution of the major tides to the this tidal frequency as defined in in To visualize the interpolation of the minor tides. The output is a .', 'config_table': 'outputfile filename inputfileAdmittance filename interpolation of minor constituents inputfileTideGeneratingPotential filename', 'display_text': 'To visualize the interpolation of the minor tides. The output is a matrix with the first column containing the tidal frequency, the second column is the tide generating amplitude (from inputfileTideGeneratingPotential), and the following columns the contribution of the major tides to the this tidal frequency as defined in in inputfileAdmittance.
'},
'DoodsonAdmittanceTimeSeries': { 'name': 'DoodsonAdmittanceTimeSeries', 'key': 'DoodsonAdmittanceTimeSeries', 'description': 'To visualize the interpolation of the minor tides it computes cosine multipliers of all major tides. Without admittance this would be a simple cos oscillation. The outputfileTimeSeries is an instrument file (MISCVALUES) containining the cos of all the major tides.', 'config_table': 'outputfileTimeSeries filename MISCVALUES (cos of major tides, ...) inputfileAdmittance filename cos/sin multipliers of the major tides timeSeries timeSeriesType', 'display_text': 'To visualize the interpolation of the minor tides it computes cosine multipliers of all major tides. Without admittance this would be a simple cos oscillation. The outputfileTimeSeries is an instrument file (MISCVALUES) containining the cos of all the major tides.
'},
'DoodsonArguments2TimeSeries': { 'name': 'DoodsonArguments2TimeSeries', 'key': 'DoodsonArguments2TimeSeries', 'description': 'Time series of doodson/fundamental arguments. The Time series of doodson/fundamental arguments. The contains the six Doodson arguments, followed by the five fundamental arguments in radians.', 'config_table': 'outputfileTimeSeries filename each epoch: 6 doodson args, 5 fundamental args [rad] timeSeries timeSeriesType', 'display_text': 'Time series of doodson/fundamental arguments. The outputfileTimeSeries contains the six Doodson arguments, followed by the five fundamental arguments in radians.'},
'DoodsonHarmonics2GriddedAmplitudeAndPhase': { 'name': 'DoodsonHarmonics2GriddedAmplitudeAndPhase', 'key': 'DoodsonHarmonics2GriddedAmplitudeAndPhase', 'description': 'This program reads a This program reads a and evaluates a single tidal constituent selected by dooddson (Doodson number or Darwin´s name, e.g. 255.555 or M2). This program computes the amplitude and phase from the cos and sin coefficients on a given This program reads a . The type of functional (e.g gravity anomalies or geoid heights) can be choosen with This program reads a . The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . To visualize the results use PlotMap .', 'config_table': 'outputfileGrid filename ampl, phase [-pi,pi], cos, sin inputfileDoodsonHarmonics filename doodson doodson tidal constituent filter sphericalHarmonicsFilterType grid gridType kernel kernelType minDegree uint maxDegree uint factor double the values on grid are multiplied by this factor R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program reads a inputfileDoodsonHarmonics and evaluates a single tidal constituent selected by dooddson (Doodson number or Darwin´s name, e.g. 255.555 or M2). This program computes the amplitude and phase from the cos and sin coefficients on a given grid. The type of functional (e.g gravity anomalies or geoid heights) can be choosen with kernel. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening. To visualize the results use PlotMap.
'},
'DoodsonHarmonics2PotentialCoefficients': { 'name': 'DoodsonHarmonics2PotentialCoefficients', 'key': 'DoodsonHarmonics2PotentialCoefficients', 'description': 'The The contains a Fourier series of a time variable gravitational potential at specific tidal frequencies (tides) where and are spherical harmonics expansions. If set the expansions are limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . The The is not a single file but a series of files. For each spherical harmonics expansion and a separate file is created where the variables variableLoopName , variableLoopDoodson , variableLoopCosSin are set accordingly. The file name should contain these variables, e.g. coeff.{name}.{doodson}.{cossin}.gfc . If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied to the cos/sin potentialCoefficients before.', 'config_table': 'outputfilePotentialCoefficients filename variableLoopName string variable with darwins\'s name of each constituent variableLoopDoodson string variable with doodson code of each constituent variableLoopCosSin string variable with \'cos\' or \'sin\' of each constituent variableLoopIndex string variable with index of each constituent (starts with zero) variableLoopCount string variable with total number of constituents inputfileDoodsonHarmonics filename inputfileTideGeneratingPotential filename to compute Xi phase correction minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius applyXi boolean apply Doodson-Warburg phase correction (see IERS conventions)', 'display_text': 'The inputfileDoodsonHarmonics contains a Fourier series of a time variable gravitational potential at specific tidal frequencies (tides) \\[ V(\\M x,t) = \\sum_{f} V_f^c(\\M x)\\cos(\\theta_f(t)) + V_f^s(\\M x)\\sin(\\theta_f(t)), \\]where $V_f^c(\\M x)$ and $V_f^s(\\M x)$ are spherical harmonics expansions. If set the expansions are limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
The outputfilePotentialCoefficients is not a single file but a series of files. For each spherical harmonics expansion $V_f^c(\\M x)$ and $V_f^s(\\M x)$ a separate file is created where the variables variableLoopName, variableLoopDoodson, variableLoopCosSin are set accordingly. The file name should contain these variables, e.g. coeff.{name}.{doodson}.{cossin}.gfc.
If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied to the cos/sin potentialCoefficients before.'},
'DoodsonHarmonicsCalculateAdmittance': { 'name': 'DoodsonHarmonicsCalculateAdmittance', 'key': 'DoodsonHarmonicsCalculateAdmittance', 'description': 'Computes the admittance function to interpolate minor tides from tides given in Computes the admittance function to interpolate minor tides from tides given in using Computes the admittance function to interpolate minor tides from tides given in .', 'config_table': 'outputfileAdmittance filename inputfileDoodsonHarmonics filename inputfileTideGeneratingPotential filename TGP threshold double [m^2/s^2] only interpolate tides with TGP greater than threshold degreeInterpolation uint polynomial degree for interpolation degreeExtrapolation uint polynomial degree for extrapolation excludeDoodsonForInterpolation doodson major tides not used for interpolation', 'display_text': 'Computes the admittance function to interpolate minor tides from tides given in inputfileDoodsonHarmonics using inputfileTideGeneratingPotential.'},
'DoodsonHarmonicsChangePartialTides': { 'name': 'DoodsonHarmonicsChangePartialTides', 'key': 'DoodsonHarmonicsChangePartialTides', 'description': 'Reads a file Reads a file and write it to Reads a file . If set the spherical harmonics expansion is limited in the range between minDegree and maxDegree inclusivly. The Reads a file and Reads a file can be used to filter the partial types that will be exported. Additional partial tides can be interpolated using the file Reads a file .', 'config_table': 'outputfileDoodsonHarmonics filename inputfileDoodsonHarmonics filename inputfileAdmittance filename interpolation of minor constituents useDoodson doodson use only these partial tides (additional tides will be interpolated) ignoreDoodson doodson ignore these partial tides minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'Reads a file inputfileDoodsonHarmonic and write it to outputfileDoodsonHarmonics. If set the spherical harmonics expansion is limited in the range between minDegree and maxDegree inclusivly. The useDoodson and ignoreDoodson can be used to filter the partial types that will be exported. Additional partial tides can be interpolated using the file inputfileAdmittance.'},
'ModelEquilibriumTide': { 'name': 'ModelEquilibriumTide', 'key': 'ModelEquilibriumTide', 'description': 'Computes the equilibrium ocean tide of the long periodic tideGeneratingPotential . The spherical harmonics expansion up to maxDegree with GM and R is estimated using a least squares adjustment. The Computes the equilibrium ocean tide of the long periodic must be a global regular grid with the vertically averaged seawater density over the ocean and zero over land. It takes iteratively self attraction and loading into account using the Love numbers Computes the equilibrium ocean tide of the long periodic and Computes the equilibrium ocean tide of the long periodic . Additionally the effects of the solid Earth tide are considered, both the gravitational (Love numbers k20 , k20plus ) and the geometrical (Love numbers h20,0 , h20,2 ) effect. See also PotentialCoefficients2DoodsonHarmonics .', 'config_table': 'outputfilePotentialCoefficients filename includes the loading maxDegree uint GM double Geocentric gravitational constant R double reference radius inputfileDensityGrid filename [kg/m^3] density of sea water, zero over land tideGeneratingPotential double [m^2/s^2] k20 double earth tide love number k20plus double earth tide love number h20_0 double earth tide love number h20_2 double earth tide love number inputfilePotentialLoadLoveNumber filename inputfileDeformationLoadLoveNumber filename iterationCount uint', 'display_text': 'Computes the equilibrium ocean tide of the long periodic tideGeneratingPotential. The spherical harmonics expansion up to maxDegree with GM and R is estimated using a least squares adjustment.
The inputfileDensityGrid must be a global regular grid with the vertically averaged seawater density over the ocean and zero over land.
Additionally the effects of the solid Earth tide are considered, both the gravitational (Love numbers k20, k20plus) and the geometrical (Love numbers h20,0, h20,2) effect.
'},
'PotentialCoefficients2DoodsonHarmonics': { 'name': 'PotentialCoefficients2DoodsonHarmonics', 'key': 'PotentialCoefficients2DoodsonHarmonics', 'description': 'Create a DoodsonHarmonic file from a list of cos/sin potentialCoefficients for given doodson (Doodson number or Darwin´s name, e.g. 255.555 or M2) tidal constituents. If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied before.', 'config_table': 'outputfileDoodsonHarmonics filename inputfileTideGeneratingPotential filename to compute Xi phase correction constituent sequence doodson doodson inputfileCosPotentialCoefficients filename inputfileSinPotentialCoefficients filename minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius applyXi boolean apply Doodson-Warburg phase correction (see IERS conventions)', 'display_text': 'Create a DoodsonHarmonic file from a list of cos/sin potentialCoefficients for given doodson (Doodson number or Darwin´s name, e.g. 255.555 or M2) tidal constituents. If applyXi the Doodson-Warburg phase correction (see IERS conventions) is applied before.'},
'GnssAntennaDefinition2ParameterVector': { 'name': 'GnssAntennaDefinition2ParameterVector', 'key': 'GnssAntennaDefinition2ParameterVector', 'description': 'Estimates parameters of a parametrization of Estimates parameters of a parametrization of , which represents all antennas from Estimates parameters of a parametrization of matching the wildcard patterns of name , serial , radome . The provided values at the area weighted grid points of the pattern of each gnssType are used as pseudo-observations. A subset of patterns can be selected with Estimates parameters of a parametrization of . The GnssAntennaDefinition file can be modified to the demands before with GnssAntennaDefinitionCreate . See also ParameterVector2GnssAntennaDefinition .', 'config_table': 'outputfileSolution filename outputfileParameterNames filename antennaCenterVariations parametrizationGnssAntennaType inputfileAntennaDefinition filename name string serial string radome string types gnssType if not set, all types in the file are used zeroNaN boolean treat NaN values as zero, otherwise values are ignored', 'display_text': 'Estimates parameters of a parametrization of antennaCenterVariations, which represents all antennas from inputfileAntennaDefinition matching the wildcard patterns of name, serial, radome.
The provided values at the area weighted grid points of the pattern of each gnssType are used as pseudo-observations. A subset of patterns can be selected with types.
See also ParameterVector2GnssAntennaDefinition.'},
'GnssAntennaDefinition2Skyplot': { 'name': 'GnssAntennaDefinition2Skyplot', 'key': 'GnssAntennaDefinition2Skyplot', 'description': 'Produce a skyplot of antenna center variations which can be plotted with PlotMap . The first antenna from Produce a matching the wildcard patterns of name , serial , radome is used. For each antenna pattern (gnssType) a separate data column is computed. A subset of patterns can be selected with Produce a . Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file . The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same as in Produce a and PlotMap .', 'config_table': 'outputfileGriddedData filename data column for each gnssType inputfileAntennaDefinition filename grid gridType name string serial string radome string types gnssType if not set, all types in the file are used R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'Produce a skyplot of antenna center variations which can be plotted with PlotMap.
The first antenna from inputfileAntennaDefinition matching the wildcard patterns of name, serial, radome is used.
For each antenna pattern (gnssType) a separate data column is computed. A subset of patterns can be selected with types.
Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file. The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same as in grid and PlotMap.
'},
'GnssAntennaDefinitionCreate': { 'name': 'GnssAntennaDefinitionCreate', 'key': 'GnssAntennaDefinitionCreate', 'description': 'Create a GNSS antenna definition file (Antenna Center Variations, ACV) consisting of multiple antennas. The antennas can be created from scratch or can be selected from existing files. This program can also be used to modify existing files. Furthermore it can be used to create accuracy definition files containing azimuth and elevation dependent accuracy values for antennas. To create an accuracy pattern for phase observations with 1 mm accuracy at zenith and no azimuth dependency, define a pattern with type = L , values = 0.001/cos(zenith/rho) . The antennas in Create a are sorted by names and duplicates are removed (first one is kept).', 'config_table': 'outputfileAntennaDefinition filename antenna gnssAntennaDefintionListType', 'display_text': 'Create a GNSS antenna definition file (Antenna Center Variations, ACV) consisting of multiple antennas. The antennas can be created from scratch or can be selected from existing files. This program can also be used to modify existing files.
Furthermore it can be used to create accuracy definition files containing azimuth and elevation dependent accuracy values for antennas. To create an accuracy pattern for phase observations with 1 mm accuracy at zenith and no azimuth dependency, define a pattern with type=L, values=0.001/cos(zenith/rho).
The antennas in outputfileAntennaDefinition are sorted by names and duplicates are removed (first one is kept).'},
'GnssAntennaNormalsConstraint': { 'name': 'GnssAntennaNormalsConstraint', 'key': 'GnssAntennaNormalsConstraint', 'description': 'Apply constraints to normal equations containing Apply constraints to . Usually the antenna center variations are estimated together with other parameters like station coordinates, signal biases and slant TEC in GnssProcessing . This results in a rank deficient matrix as not all parameters can be separated. The deficient can be solved by adding pseudo observation equations as constraints. To separate antenna center variations and signal biases apply constraint:mean for each GNSS Apply constraints to . The observation equation for the integral mean of antenna center variations (ACV) in all azimuth and elevation dependent directions is approximated by a grid defined by deltaAzimuth , deltaZenith , and maxZenith . To separate from station coordinates use constraint:centerMean and from slant TEC parameters use constraint:TEC . The constraints are applied separately to all antennas matching the wildcard patterns of name , serial , radome . See also ParameterVector2GnssAntennaDefinition .', 'config_table': 'outputfileNormalEquation filename with applied constraints inputfileNormalEquation filename constraint choice center sequence zero center (x,y,z) of a single pattern type gnssType applied for each matching types applyWeight boolean from normal equations sigma double [m] centerMean sequence zero center (x,y,z) as (weighted) mean of all patterns applyWeight boolean from normal equations sigma double [m] constant sequence zero constant (mean of all directions) of a single pattern type gnssType applied for each matching types applyWeight boolean from normal equations sigma double [m] constantMean sequence zero constant (mean of all directions) as (weighted) mean of all patterns applyWeight boolean from normal equations sigma double [m] TEC sequence zero TEC computed as (weighetd) least squares from all types type gnssType applied for combination of matching types applyWeight boolean from normal equations sigma double [TECU] antennaCenterVariations parametrizationGnssAntennaType antennaName string apply constraints to all machting antennas antennaSerial string apply constraints to all machting antennas antennaRadome string apply constraints to all machting antennas deltaAzimuth angle [degree] sampling of pattern to estimate center/constant deltaZenith angle [degree] sampling of pattern to estimate center/constant maxZenith angle [degree] sampling of pattern to estimate center/constant', 'display_text': 'Apply constraints to normal equations containing antennaCenterVariations. Usually the antenna center variations are estimated together with other parameters like station coordinates, signal biases and slant TEC in GnssProcessing. This results in a rank deficient matrix as not all parameters can be separated. The deficient can be solved by adding pseudo observation equations as constraints.
To separate antenna center variations and signal biases apply constraint:mean for each GNSS type. The observation equation for the integral mean of antenna center variations (ACV) in all azimuth $A$ and elevation $E$ dependent directions \\[ 0 = \\iint ACV(A,E)\\, d\\Phi \\approx \\sum_i ACV(A_i,E_i)\\, \\Delta\\Phi_i \\]is approximated by a grid defined by deltaAzimuth, deltaZenith, and maxZenith.
To separate from station coordinates use constraint:centerMean and from slant TEC parameters use constraint:TEC.
The constraints are applied separately to all antennas matching the wildcard patterns of name, serial, radome.
See also ParameterVector2GnssAntennaDefinition.'},
'GnssAttitudeInfoCreate': { 'name': 'GnssAttitudeInfoCreate', 'key': 'GnssAttitudeInfoCreate', 'description': 'Creates attitude info file ( Instrument(MISCVALUES) ) used by SimulateStarCameraGnss . One or more attitudeInfo s can be specified. They are valid from timeStart until the start of the subsequent attitudeInfo . maxManeuverTime is used by SimulateStarCameraGnss to look for ongoing orbit maneuvers before/after the given orbit that might affect the attitude at the beginning or end of a given orbit. Here is a list of GNSS satellite types for which the attitude behavior is known and their respective attitude modes and required parameters: GPS-II/IIA [1] defaultMode : nominalYawSteering midnightMode : shadowMaxYawSteeringAndRecovery noonMode : catchUpYawSteering maxYawRate : 0.12 deg/s yawBias : 0.5 deg maxManeuverTime : 2 h GPS-IIR/IIR-M [1] defaultMode : nominalYawSteering midnightMode : catchUpYawSteering noonMode : catchUpYawSteering maxYawRate : 0.2 deg/s maxManeuverTime : 30 min GPS-IIF [2] defaultMode : nominalYawSteering midnightMode : shadowConstantYawSteering noonMode : catchUpYawSteering maxYawRate : 0.11 deg/s yawBias : -0.7 deg maxManeuverTime : 1.5 h GLO-M [3] defaultMode : nominalYawSteering midnightMode : shadowMaxYawSteeringAndStop noonMode : centeredMaxYawSteering maxYawRate : 0.25 deg/s noonBetaThreshold : 2 deg maxManeuverTime : 1.5 h GAL-1 [4] defaultMode : nominalYawSteering midnightMode : smoothedYawSteering1 noonMode : smoothedYawSteering1 maxManeuverTime : 1.5 h GAL-2 [4] defaultMode : nominalYawSteering midnightMode : smoothedYawSteering2 noonMode : smoothedYawSteering2 midnightBetaThreshold : 4.1 deg noonBetaThreshold : 4.1 deg activationThreshold : 10 deg maxManeuverTime : 5656 s BDS-2G/3G [5, 6] defaultMode : orbitNormal midnightMode : orbitNormal noonMode : orbitNormal BDS-2I [5] defaultMode : nominalYawSteering midnightMode : betaDependentOrbitNormal noonMode : betaDependentOrbitNormal maxYawRate : 0.085 deg/s midnightBetaThreshold : 4 deg noonBetaThreshold : 4 deg activationThreshold : 5 deg maxManeuverTime : 24 h BDS-2M [5] defaultMode : nominalYawSteering midnightMode : betaDependentOrbitNormal noonMode : betaDependentOrbitNormal maxYawRate : 0.159 deg/s midnightBetaThreshold : 4 deg noonBetaThreshold : 4 deg activationThreshold : 5 deg maxManeuverTime : 13 h BDS-3I/3SI [6] defaultMode : nominalYawSteering midnightMode : smoothedYawSteering2 noonMode : smoothedYawSteering2 midnightBetaThreshold : 3 deg noonBetaThreshold : 3 deg activationThreshold : 6 deg maxManeuverTime : 5740 s BDS-3M/3SM [6] defaultMode : nominalYawSteering midnightMode : smoothedYawSteering2 noonMode : smoothedYawSteering2 midnightBetaThreshold : 3 deg noonBetaThreshold : 3 deg activationThreshold : 6 deg maxManeuverTime : 3090 s QZS-1 [7] defaultMode : nominalYawSteering midnightMode : betaDependentOrbitNormal noonMode : betaDependentOrbitNormal maxYawRate : 0.01 deg/s yawBias : 180 deg midnightBetaThreshold : 20 deg noonBetaThreshold : 20 deg activationThreshold : 18.5 deg maxManeuverTime : 24 h QZS-2G [7] defaultMode : orbitNormal midnightMode : orbitNormal noonMode : orbitNormal yawBias : 180 deg QZS-2I [7] defaultMode : nominalYawSteering midnightMode : centeredMaxYawSteering noonMode : centeredMaxYawSteering maxYawRate : 0.055 deg/s midnightBetaThreshold : 5 deg noonBetaThreshold : 5 deg maxManeuverTime : 1.5 h Some specific satellites may deviate in their attitude behavior or parameters (e.g. G013-G040, R713, C005, C015, C017, J001). References for the attitude behavior information: https://www.gsc-europa.eu/support-to-developers/galileo-satellite-metadata#3 https://qzss.go.jp/en/technical/qzssinfo/index.html', 'config_table': 'outputfileAttitudeInfo filename attitudeInfo sequence timeStart time defaultMode choice default attitude mode nominalYawSteering yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) orbitNormal keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) midnightMode choice attitude mode for maneuvers around orbit midnight nominalYawSteering yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) orbitNormal keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) catchUpYawSteering yaw at maximum yaw rate to catch up to nominal yaw angle (e.g. GPS-* (noon), GPS-IIR (midnight)) shadowMaxYawSteeringAndRecovery yaw at maximum yaw rate from shadow start to end, recover after shadow (e.g. GPS-IIA (midnight)) shadowMaxYawSteeringAndStop yaw at maximum yaw rate from shadow start until nominal yaw angle at shadow end is reached, then stop (e.g. GLO-M (midnight)) shadowConstantYawSteering yaw at constant yaw rate from shadow start to end (e.g. GPS-IIF (midnight)) centeredMaxYawSteering yaw at maximum yaw rate centered around noon/midnight (e.g. QZS-2I, GLO-M (noon)) smoothedYawSteering1 yaw based on an auxiliary Sun vector for a smooth yaw maneuver (e.g. GAL-1) smoothedYawSteering2 yaw based on a modified yaw-steering law for a smooth yaw maneuver (e.g. GAL-2, BDS-3M, BDS-3I) betaDependentOrbitNormal switch to orbit normal mode if below beta angle threshold (e.g. BDS-2M, BDS-2I, QZS-1) noonMode choice attitude mode for maneuvers around orbit noon nominalYawSteering yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) orbitNormal keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) catchUpYawSteering yaw at maximum yaw rate to catch up to nominal yaw angle (e.g. GPS-* (noon), GPS-IIR (midnight)) centeredMaxYawSteering yaw at maximum yaw rate centered around noon/midnight (e.g. QZS-2I, GLO-M (noon)) smoothedYawSteering1 yaw based on an auxiliary Sun vector for a smooth yaw maneuver (e.g. GAL-1) smoothedYawSteering2 yaw based on a modified yaw-steering law for a smooth yaw maneuver (e.g. GAL-2, BDS-3M, BDS-3I) betaDependentOrbitNormal switch to orbit normal mode if below beta angle threshold (e.g. BDS-2M, BDS-2I, QZS-1) maxYawRate double [degree/s] maximum yaw rate of the satellite yawBias double [degree] yaw bias applied in satellite attitude control system midnightBetaThreshold double [degree] limit midnight maneuver to this absolute angle of the Sun above/below the satellite orbital plane noonBetaThreshold double [degree] limit noon maneuver to this absolute angle of the Sun above/below the satellite orbital plane activationThreshold double [degree] limit maneuver to this yaw/Earth-spacecraft-Sun angle (depending on mode) maxManeuverTime double [s] maximum duration of maneuver or maximum maneuver lookup time before/after orbit start/end', 'display_text': 'Creates attitude info file (Instrument(MISCVALUES)) used by SimulateStarCameraGnss. One or more attitudeInfos can be specified. They are valid from timeStart until the start of the subsequent attitudeInfo. maxManeuverTime is used by SimulateStarCameraGnss to look for ongoing orbit maneuvers before/after the given orbit that might affect the attitude at the beginning or end of a given orbit.
Here is a list of GNSS satellite types for which the attitude behavior is known and their respective attitude modes and required parameters:
GPS-II/IIA [1]
defaultMode: nominalYawSteering
midnightMode: shadowMaxYawSteeringAndRecovery
noonMode: catchUpYawSteering
maxYawRate: 0.12 deg/s
yawBias: 0.5 deg
maxManeuverTime: 2 h
GPS-IIR/IIR-M [1]
defaultMode: nominalYawSteering
midnightMode: catchUpYawSteering
noonMode: catchUpYawSteering
maxYawRate: 0.2 deg/s
maxManeuverTime: 30 min
GPS-IIF [2]
defaultMode: nominalYawSteering
midnightMode: shadowConstantYawSteering
noonMode: catchUpYawSteering
maxYawRate: 0.11 deg/s
yawBias: -0.7 deg
maxManeuverTime: 1.5 h
GLO-M [3]
defaultMode: nominalYawSteering
midnightMode: shadowMaxYawSteeringAndStop
noonMode: centeredMaxYawSteering
maxYawRate: 0.25 deg/s
noonBetaThreshold: 2 deg
maxManeuverTime: 1.5 h
GAL-1 [4]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering1
noonMode: smoothedYawSteering1
maxManeuverTime: 1.5 h
GAL-2 [4]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 4.1 deg
noonBetaThreshold: 4.1 deg
activationThreshold: 10 deg
maxManeuverTime: 5656 s
BDS-2G/3G [5, 6]
defaultMode: orbitNormal
midnightMode: orbitNormal
noonMode: orbitNormal
BDS-2I [5]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.085 deg/s
midnightBetaThreshold: 4 deg
noonBetaThreshold: 4 deg
activationThreshold: 5 deg
maxManeuverTime: 24 h
BDS-2M [5]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.159 deg/s
midnightBetaThreshold: 4 deg
noonBetaThreshold: 4 deg
activationThreshold: 5 deg
maxManeuverTime: 13 h
BDS-3I/3SI [6]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 3 deg
noonBetaThreshold: 3 deg
activationThreshold: 6 deg
maxManeuverTime: 5740 s
BDS-3M/3SM [6]
defaultMode: nominalYawSteering
midnightMode: smoothedYawSteering2
noonMode: smoothedYawSteering2
midnightBetaThreshold: 3 deg
noonBetaThreshold: 3 deg
activationThreshold: 6 deg
maxManeuverTime: 3090 s
QZS-1 [7]
defaultMode: nominalYawSteering
midnightMode: betaDependentOrbitNormal
noonMode: betaDependentOrbitNormal
maxYawRate: 0.01 deg/s
yawBias: 180 deg
midnightBetaThreshold: 20 deg
noonBetaThreshold: 20 deg
activationThreshold: 18.5 deg
maxManeuverTime: 24 h
QZS-2G [7]
defaultMode: orbitNormal
midnightMode: orbitNormal
noonMode: orbitNormal
yawBias: 180 deg
QZS-2I [7]
defaultMode: nominalYawSteering
midnightMode: centeredMaxYawSteering
noonMode: centeredMaxYawSteering
maxYawRate: 0.055 deg/s
midnightBetaThreshold: 5 deg
noonBetaThreshold: 5 deg
maxManeuverTime: 1.5 h
Some specific satellites may deviate in their attitude behavior or parameters (e.g. G013-G040, R713, C005, C015, C017, J001).
'},
'GnssBiasClockAlignment': { 'name': 'GnssBiasClockAlignment', 'key': 'GnssBiasClockAlignment', 'description': 'This program can be used to absolutely align GNSS transmitter clocks to reference clocks (i.e. broadcast clocks). Each \'group\' of transmitter s, usually a system like GPS or Galileo, is aligned individually by a constant shift over all transmitters. If alignClocksByFreqNo is set, GLONASS transmitters will be divided by frequency number into groups of nominally two transmitters. The offset between clocks and reference clocks will be shifted into receiver code biases, if receiver is provided." By setting alignFreqNoBiasesAtReceiver and providing receiver , this program can further align GLONASS transmitter signal biases so that the differences between frequency number-dependent receiver signal biases are minimal, which helps if PPP users don\'t set up individual signal biases per frequency number at the receiver. Alignment is done by computing signal bias residuals to the mean over all frequency numbers of a signal type at each receiver and then computing the means over all receivers for each frequency number and shifting those from the receiver signal biases to the transmitter signal biases. Internal consistency of the biases is not affected by this. If you only want to align GLONASS frequency numbers, provide the same clocks in This program can be used to absolutely align GNSS transmitter clocks to reference clocks (i.e. broadcast clocks). Each \'group\' of and This program can be used to absolutely align GNSS transmitter clocks to reference clocks (i.e. broadcast clocks). Each \'group\' of .', 'config_table': 'transmitter sequence one element per satellite outputfileClock filename aligned clock instrument file outputfileSignalBias filename (GLONASS only) aligned signal bias file inputfileClock filename clock instrument file inputfileReferenceClock filename reference clock instrument file inputfileSignalBias filename (GLONASS only) signal bias file inputfileTransmitterInfo filename transmitter platform file receiver sequence one element per station outputfileSignalBias filename aligned signal bias file inputfileSignalBias filename signal bias file alignClocksByFreqNo boolean align clocks for each GLONASS frequency number separately alignFreqNoBiasesAtReceiver boolean align frequency number-dependent code biases for each receiver', 'display_text': 'This program can be used to absolutely align GNSS transmitter clocks to reference clocks (i.e. broadcast clocks). Each \'group\' of transmitters, usually a system like GPS or Galileo, is aligned individually by a constant shift over all transmitters. If alignClocksByFreqNo is set, GLONASS transmitters will be divided by frequency number into groups of nominally two transmitters. The offset between clocks and reference clocks will be shifted into receiver code biases, if receiver is provided."
By setting alignFreqNoBiasesAtReceiver and providing receiver, this program can further align GLONASS transmitter signal biases so that the differences between frequency number-dependent receiver signal biases are minimal, which helps if PPP users don\'t set up individual signal biases per frequency number at the receiver. Alignment is done by computing signal bias residuals to the mean over all frequency numbers of a signal type at each receiver and then computing the means over all receivers for each frequency number and shifting those from the receiver signal biases to the transmitter signal biases. Internal consistency of the biases is not affected by this.
If you only want to align GLONASS frequency numbers, provide the same clocks in inputfileClock and inputfileReferenceClock.'},
'GnssEstimateClockShift': { 'name': 'GnssEstimateClockShift', 'key': 'GnssEstimateClockShift', 'description': 'This program estimates an epoch-wise clock shift in a constellation of GNSS satellites. Each separate data represents a satellite... (e.g. 32 GPS satellites). The shift to reference clocks can be estimated by providing This program estimates an epoch-wise clock shift in a constellation of GNSS satellites. Each separate . Clock shifts are estimated for each epoch given by This program estimates an epoch-wise clock shift in a constellation of GNSS satellites. Each separate .', 'config_table': 'outputfileShiftTimeSeries filename columns: mjd, clock shift data sequence e.g. satellite outputfileInstrument filename corrected clocks outputfileInstrumentDiff filename clock difference after correction inputfileInstrument filename input clocks inputfileInstrumentRef filename reference clocks (subtracted from input clocks) timeSeries timeSeriesType clock epochs margin double [s] margin for time comparison', 'display_text': 'This program estimates an epoch-wise clock shift in a constellation of GNSS satellites. Each separate data represents a satellite... (e.g. 32 GPS satellites). The shift to reference clocks can be estimated by providing inputfileInstrumentRef. Clock shifts are estimated for each epoch given by timeSeries.'},
'GnssGlonassFrequencyNumberUpdate': { 'name': 'GnssGlonassFrequencyNumberUpdate', 'key': 'GnssGlonassFrequencyNumberUpdate', 'description': 'Update/set GLONASS frequency number in Update/set GLONASS frequency number in files. The Update/set GLONASS frequency number in can be generated with SinexMetadata2GlonassFrequencyNumber . See also GnssAntex2AntennaDefinition .', 'config_table': 'outputfileTransmitterInfo filename templated for PRN list (variableNamePrn) inputfileTransmitterInfo filename templated for PRN list (variableNamePrn) inputfilePrn2FrequencyNumber filename matrix with columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber prn string PRN (e.g. R01) for transmitter info files variableNamePrn string variable name for PRN in transmitter info files', 'display_text': 'Update/set GLONASS frequency number in inputfileTransmitterInfo files. The inputfilePrn2FrequencyNumber can be generated with SinexMetadata2GlonassFrequencyNumber.
See also GnssAntex2AntennaDefinition.'},
'GnssProcessing': { 'name': 'GnssProcessing', 'key': 'GnssProcessing', 'description': 'This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. The primary use cases of this program are: GNSS satellite orbit determination and station network analysis Kinematic orbit determination of LEO satellites GNSS precise point positioning (PPP) The observation epochs are defined by This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. and only observations at these epochs (within a timeMargin ) are considered. To calculate observation equations from the tracks, the model parameters or unknown parameters need to be defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. . Some of the This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. also include a priori models. Lastly it is required to define the process flow of the gnssProcessing. This is accomplished with a list of This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. . Each step is processed consecutively. Some steps allow the selection of parameters, epochs, or the normal equation structure, which affects all subsequent steps. A minimal example consists of following steps: This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. : iterative float solution with outlier downeighting This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. : fix ambiguities to integer and remove them from the normals This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. : few iteration for final outlier downweighting This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. : write the output files defined in This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. If the program is run on multiple processes the This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. s (stations or LEO satellites) are distributed over the processes. See also GnssSimulateReceiver .', 'config_table': 'timeSeries timeSeriesType defines observation epochs timeMargin double [seconds] margin to consider two times identical transmitter gnssTransmitterGeneratorType constellation of GNSS satellites receiver gnssReceiverGeneratorType ground station network or LEO satellite earthRotation earthRotationType apriori earth rotation parametrization gnssParametrizationType models and parameters processingStep gnssProcessingStepType steps are processed consecutively', 'display_text': 'This program processes GNSS observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it.
The observation epochs are defined by timeSeries and only observations at these epochs (within a timeMargin) are considered.
To calculate observation equations from the tracks, the model parameters or unknown parameters need to be defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined parametrization. Some of the parametrization also include a priori models.
Lastly it is required to define the process flow of the gnssProcessing. This is accomplished with a list of processingSteps. Each step is processed consecutively. Some steps allow the selection of parameters, epochs, or the normal equation structure, which affects all subsequent steps. A minimal example consists of following steps:
estimate: iterative float solution with outlier downeighting
resolveAmbiguities: fix ambiguities to integer and remove them from the normals
estimate: few iteration for final outlier downweighting
If the program is run on multiple processes the receivers (stations or LEO satellites) are distributed over the processes.
See also GnssSimulateReceiver.'},
'GnssReceiverDefinitionCreate': { 'name': 'GnssReceiverDefinitionCreate', 'key': 'GnssReceiverDefinitionCreate', 'description': 'Create a GNSS receiver definition file .', 'config_table': 'outputfileGnssReceiverDefinition filename receiverDefinition sequence name string serial string version string comment string gnssType gnssType', 'display_text': 'Create a GNSS receiver definition file.'},
'GnssResiduals2AccuracyDefinition': { 'name': 'GnssResiduals2AccuracyDefinition', 'key': 'GnssResiduals2AccuracyDefinition', 'description': 'Compute antenna accuracies from observation Compute antenna accuracies from observation . The Compute antenna accuracies from observation is needed to assign the residuals to the equipped antenna at observation times. The Compute antenna accuracies from observation contains at first step the same accuracy information for all antennas as the input file. Only the azimuth and elevation dependent grid points of the patterns where enough residuals are available ( minRedundancy ) are replaced by estimated accuracy where are the azimuth and elevation dependent residuals and the corresponding redundancies (number of observations minus the contribution to the estimated parameters). The Compute antenna accuracies from observation can be modified to the demands before with GnssAntennaDefinitionCreate (e.g. with antenna:resample ). To verify the results the Compute antenna accuracies from observation and the accumulated Compute antenna accuracies from observation of the computed pattern grid points can be written. Example: Analysis of TerraSAR-X residuals of one month shows that low elevation GPS satellites are not tracked by the onboard receiver. An estimation of accuracies for these directions is not possible from the residuals and the apriori accuracies are left untouched. The other directions show very low phase noise hardly elevation and azimuth dependent for L2W. A nearly zero mean indicates the use of adequate antennca center variations in the processing. See also GnssResiduals2TransmitterAccuracyDefinition .', 'config_table': 'outputfileAccuracyDefinition filename elevation and azimuth dependent accuracy outputfileAntennaMean filename weighted mean of the residuals outputfileAntennaRedundancy filename redundancy of adjustment inputfileAccuracyDefinition filename apriori accuracies inputfileStationInfo filename to assign residuals to antennas isTransmitter boolean stationInfo is of a transmitter thresholdOutlier double ignore residuals with sigma/sigma0 greater than threshold minRedundancy double min number of residuals. to estimate sigma inputfileResiduals filename GNSS receiver residuals', 'display_text': 'Compute antenna accuracies from observation inputfileResiduals. The inputfileStationInfo is needed to assign the residuals to the equipped antenna at observation times.
The outputfileAccuracyDefinition contains at first step the same accuracy information for all antennas as the input file. Only the azimuth $A$ and elevation $E$ dependent grid points of the patterns where enough residuals are available ($>$ minRedundancy) are replaced by estimated accuracy \\[ \\sigma(A,E) = \\sqrt{\\frac{\\sum_i e_i^2(A,E)}{\\sum_i r_i(A,E)}}, \\]where $e_i$ are the azimuth and elevation dependent residuals and $r_i$ the corresponding redundancies (number of observations minus the contribution to the estimated parameters).
Example: Analysis of TerraSAR-X residuals of one month shows that low elevation GPS satellites are not tracked by the onboard receiver. An estimation of accuracies for these directions is not possible from the residuals and the apriori accuracies are left untouched. The other directions show very low phase noise hardly elevation and azimuth dependent for L2W. A nearly zero mean indicates the use of adequate antennca center variations in the processing.
See also GnssResiduals2TransmitterAccuracyDefinition.'},
'GnssResiduals2Skyplot': { 'name': 'GnssResiduals2Skyplot', 'key': 'GnssResiduals2Skyplot', 'description': 'Write GNSS residuals together with azimuth and elevation to be plotted with PlotMap . Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file . The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same as in PlotMap . If with Write GNSS residuals together with azimuth and elevation to be plotted with (e.g. \' ***G18 \') a single transmitter is selected the azimuth and elevation are computed from the transmitter point of view. For each GNSS Write GNSS residuals together with azimuth and elevation to be plotted with an extra data column is created. A GNSS residual file includes additional information besides the residuals, which can also be selected with Write GNSS residuals together with azimuth and elevation to be plotted with A1* , E1* : azimuth and elevation at receiver A2* , E2* : azimuth and elevation at transmitter I** : Estimated slant total electron content (STEC) Furthermore these files may include for each residual Write GNSS residuals together with azimuth and elevation to be plotted with information about the redundancy and the accuracy relation of the estimated versus the apriori from the least squares adjustment. The 3 values (residuals, redundancy, ) are coded with the same type. To get access to all values the corresponding type must be repeated in Write GNSS residuals together with azimuth and elevation to be plotted with .', 'config_table': 'outputfileGriddedData filename type gnssType typeTransmitter gnssType choose transmitter view, e.g. \'***G18\' inputfileResiduals filename GNSS receiver residuals R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'Write GNSS residuals together with azimuth and elevation to be plotted with PlotMap. Azimuth and elevation are written as ellipsoidal longitude and latitude in a griddedData file. The choosen ellipsoid parameters R and inverseFlattening are arbitrary but should be the same as in PlotMap. If with typeTransmitter (e.g. \'***G18\') a single transmitter is selected the azimuth and elevation are computed from the transmitter point of view.
For each GNSS type an extra data column is created.
A GNSS residual file includes additional information besides the residuals, which can also be selected with type
A1*, E1*: azimuth and elevation at receiver
A2*, E2*: azimuth and elevation at transmitter
I**: Estimated slant total electron content (STEC)
Furthermore these files may include for each residual type information about the redundancy and the accuracy relation $\\sigma/\\sigma_0$ of the estimated $\\sigma$ versus the apriori $\\sigma_0$ from the least squares adjustment. The 3 values (residuals, redundancy, $\\sigma/\\sigma_0$) are coded with the same type. To get access to all values the corresponding type must be repeated in type.
'},
'GnssResiduals2TransmitterAccuracyDefinition': { 'name': 'GnssResiduals2TransmitterAccuracyDefinition', 'key': 'GnssResiduals2TransmitterAccuracyDefinition', 'description': 'Compute antenna accuracies from observation Compute antenna accuracies from observation . The Compute antenna accuracies from observation is needed to assign the residuals to the equipped antenna at observation times. The Compute antenna accuracies from observation contains at first step the same accuracy information for all antennas as the input file. Only the azimuth and elevation dependent grid points of the patterns where enough residuals are available ( minRedundancy ) are replaced by estimated accuracy where are the azimuth and elevation dependent residuals and the corresponding redundancies (number of observations minus the contribution to the estimated parameters). The Compute antenna accuracies from observation can be modified to the demands before with GnssAntennaDefinitionCreate (e.g. with antenna:resample ). To verify the results the Compute antenna accuracies from observation and the accumulated Compute antenna accuracies from observation of the computed pattern grid points can be written. See also GnssResiduals2AccuracyDefinition .', 'config_table': 'outputfileAccuracyDefinition filename elevation and azimuth dependent accuracy outputfileAntennaMean filename weighted mean of the residuals outputfileAntennaRedundancy filename redundancy of adjustment inputfileAccuracyDefinition filename apriori accuracies inputfileTransmitterInfo filename to assign residuals to antennas minRedundancy double min number of residuals. to estimate sigma inputfileResiduals filename GNSS receiver residuals', 'display_text': 'Compute antenna accuracies from observation inputfileResiduals. The inputfileTransmitterInfo is needed to assign the residuals to the equipped antenna at observation times.
The outputfileAccuracyDefinition contains at first step the same accuracy information for all antennas as the input file. Only the azimuth $A$ and elevation $E$ dependent grid points of the patterns where enough residuals are available ($>$ minRedundancy) are replaced by estimated accuracy \\[ \\sigma(A,E) = \\sqrt{\\frac{\\sum_i e_i^2(A,E)}{\\sum_i r_i(A,E)}}, \\]where $e_i$ are the azimuth and elevation dependent residuals and $r_i$ the corresponding redundancies (number of observations minus the contribution to the estimated parameters).
See also GnssResiduals2AccuracyDefinition.'},
'GnssSignalBias2Matrix': { 'name': 'GnssSignalBias2Matrix', 'key': 'GnssSignalBias2Matrix', 'description': 'Computes signal biases for a given list of Computes signal biases for a given list of . If the type list is empty, all types contained in Computes signal biases for a given list of are used. The resulting Computes signal biases for a given list of contains a vector with an entry for each type.', 'config_table': 'outputfileMatrix filename outputfileTypes filename ASCII list of types inputfileSignalBias filename types gnssType if not set, all types in the file are used', 'display_text': 'Computes signal biases for a given list of types. If the type list is empty, all types contained in inputfileSignalBias are used. The resulting outputfileMatrix contains a vector with an entry for each type.'},
'GnssSimulateReceiver': { 'name': 'GnssSimulateReceiver', 'key': 'GnssSimulateReceiver', 'description': 'This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in GnssProcessing , for example to conduct closed-loop simulations. One or more GNSS constellations must be defined via This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in . Receivers such as ground station networks or Low Earth Orbit (LEO) satellites can be defined via This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in . If multiple receivers defined an This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in and This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in are written for each single receiver with the variable {station} being replaced by the receiver name. A list of simulated observation types can be defined via This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in . Noise can be added to both observations and clock errors via This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in and This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in , respectively. Observation noise is interpreted as a factor that is multiplied to the accuracy derived from the accuracy pattern of the respective observation type (see This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in in This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in ). The This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in are used to simulate a priori models (e.g. troposphere, signal biases). Parameter settings and outputfiles are ignored. If the program is run on multiple processes the This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in s (stations or LEO satellites) are distributed over the processes.', 'config_table': 'outputfileGnssReceiver filename variable {station} available, simulated observations outputfileClock filename variable {station} available, simulated receiver clock errors timeSeries timeSeriesType defines observation epochs timeMargin double [seconds] margin to consider two times identical transmitter gnssTransmitterGeneratorType constellation of GNSS satellites receiver gnssReceiverGeneratorType ground station network or LEO satellite earthRotation earthRotationType apriori earth rotation parametrization gnssParametrizationType models and parameters observationType gnssType simulated observation types noiseObservation noiseGeneratorType [-] noise is multiplied with type accuracy pattern of receiver noiseClockReceiver noiseGeneratorType [m] noise added to the simulated receiver clock', 'display_text': 'This program simulates observations from receivers to GNSS satellites. These simulated observations can then be used in GnssProcessing, for example to conduct closed-loop simulations.
One or more GNSS constellations must be defined via transmitter. Receivers such as ground station networks or Low Earth Orbit (LEO) satellites can be defined via receiver.
A list of simulated observation types can be defined via observationType. Noise can be added to both observations and clock errors via noiseObervation and noiseClockReceiver, respectively. Observation noise is interpreted as a factor that is multiplied to the accuracy derived from the accuracy pattern of the respective observation type (see inputfileAccuracyDefinition in receiver).
The parametrization are used to simulate a priori models (e.g. troposphere, signal biases). Parameter settings and outputfiles are ignored.
If the program is run on multiple processes the receivers (stations or LEO satellites) are distributed over the processes.'},
'InstrumentGnssReceiver2TimeSeries': { 'name': 'InstrumentGnssReceiver2TimeSeries', 'key': 'InstrumentGnssReceiver2TimeSeries', 'description': 'Convert selected GNSS observations or residuals into a simpler time series format. The outputfileTimeSeries is an instrument file (MISCVALUES). For each epoch the first data column contains the PRN, the second the satellite system, followed by a column for each GNSS Convert selected GNSS observations or residuals into a simpler time series format. The . As normally more than one GNSS transmitter is tracked per epoch, the output file has several lines per observed epoch (epochs with the same time, one for each transmitter). The second data column of the output contains a number representating the system 71: \'G\', GPS 82: \'R\', GLONASS 69: \'E\', GALILEO 67: \'C\', BDS 83: \'S\', SBAS 74: \'J\', QZSS 73: \'I\', IRNSS . A GNSS residual file includes additional information besides the residuals, which can also be selected with Convert selected GNSS observations or residuals into a simpler time series format. The A1* , E1* : azimuth and elevation at receiver A2* , E2* : azimuth and elevation at transmitter I** : Estimated slant total electron content (STEC) Furthermore these files may include for each residual Convert selected GNSS observations or residuals into a simpler time series format. The information about the redundancy and the accuracy relation of the estimated versus the apriori from the least squares adjustment. The three values (residuals, redundancy, ) are coded with the same type. To get access to all values the corresponding type must be repeated in Convert selected GNSS observations or residuals into a simpler time series format. The . Example: Selected GPS phase residuals ( Convert selected GNSS observations or residuals into a simpler time series format. The =\' L1*G \' and Convert selected GNSS observations or residuals into a simpler time series format. The =\' L2*G \'). Plotted with PlotGraph with two Convert selected GNSS observations or residuals into a simpler time series format. The ( valueX =\' data0 \', valueY =\' 100*data3+data1 \' and valueY =\' 100*data4+data1 \' respectively).', 'config_table': 'outputfileTimeSeries filename Instrument (MISCVALUES): prn, system, values for each type inputfileGnssReceiver filename GNSS receiver observations or residuals type gnssType', 'display_text': 'Convert selected GNSS observations or residuals into a simpler time series format. The outputfileTimeSeries is an instrument file (MISCVALUES). For each epoch the first data column contains the PRN, the second the satellite system, followed by a column for each GNSS type. As normally more than one GNSS transmitter is tracked per epoch, the output file has several lines per observed epoch (epochs with the same time, one for each transmitter).
The second data column of the output contains a number representating the system
71: \'G\', GPS
82: \'R\', GLONASS
69: \'E\', GALILEO
67: \'C\', BDS
83: \'S\', SBAS
74: \'J\', QZSS
73: \'I\', IRNSS .
A GNSS residual file includes additional information besides the residuals, which can also be selected with type
A1*, E1*: azimuth and elevation at receiver
A2*, E2*: azimuth and elevation at transmitter
I**: Estimated slant total electron content (STEC)
Furthermore these files may include for each residual type information about the redundancy and the accuracy relation $\\sigma/\\sigma_0$ of the estimated $\\sigma$ versus the apriori $\\sigma_0$ from the least squares adjustment. The three values (residuals, redundancy, $\\sigma/\\sigma_0$) are coded with the same type. To get access to all values the corresponding type must be repeated in type.
Example: Selected GPS phase residuals (type=\'L1*G\' and type=\'L2*G\'). Plotted with PlotGraph with two layer:linesAndPoints (valueX=\'data0\', valueY=\'100*data3+data1\' and valueY=\'100*data4+data1\' respectively).'},
'ParameterVector2GnssAntennaDefinition': { 'name': 'ParameterVector2GnssAntennaDefinition', 'key': 'ParameterVector2GnssAntennaDefinition', 'description': 'Updates an GnssAntennaDefinition file with estimated parameters which belongs to the parametrization Updates an . The Updates an contains all antennas from Updates an . The antenna center variations representend by the Updates an are added to the matching antennas. The GnssAntennaDefinition file can be modified to the demands before with GnssAntennaDefinitionCreate . The following steps are used to estimate antenna center variations: GnssAntennaDefinitionCreate or GnssAntex2AntennaDefinition GnssProcessing with inputfileAntennaDefinition as apriori and writing normal equations with parametrization of Updates an NormalsEliminate : eliminate all other than antenna parameters NormalsAccumulate : accumulate normals over a sufficient long period GnssAntennaNormalsConstraint : constrain unsolvable parameter linear combinations NormalsSolverVCE : estimate the parameter vector ParameterVector2GnssAntennaDefinition : update inputfileAntennaDefinition See also ParameterVector2GnssAntennaDefinition , GnssAntennaNormalsConstraint .', 'config_table': 'outfileAntennaDefinition filename all apriori antennas inputfileAntennaDefinition filename apriori antennas antennaCenterVariations parametrizationGnssAntennaType inputfileSolution filename inputfileParameterNames filename', 'display_text': 'Updates an GnssAntennaDefinition file with estimated parameters which belongs to the parametrization antennaCenterVariations. The outfileAntennaDefinition contains all antennas from inputfileAntennaDefinition. The antenna center variations representend by the inputfileSolution are added to the matching antennas.
See also ParameterVector2GnssAntennaDefinition, GnssAntennaNormalsConstraint.'},
'EnsembleAveragingScaleModel': { 'name': 'EnsembleAveragingScaleModel', 'key': 'EnsembleAveragingScaleModel', 'description': 'This programs estimate satellite-to-satellite-tracking (SST) deterministic signals due to eclipse transits from residuals. The ensemble averaging method is used to characterize the average properties of signal shapes across all transit events. Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval .', 'config_table': 'outputfileScaleModel filename inputfileGrace1EclipseFactor filename GRACE-A eclipse factors computed with integrated orbit inputfileGrace2EclipseFactor filename GRACE-B eclipse factors computed with integrated orbit inputfileGraceResiduals filename SST Residuals timeMargin uint epochs before eclipse mode waveLength uint length of the sample wave averagingInterval sequence nearestNeighborNumber uint', 'display_text': 'This programs estimate satellite-to-satellite-tracking (SST) deterministic signals due to eclipse transits from residuals. The ensemble averaging method is used to characterize the average properties of signal shapes across all transit events. Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval.'},
'GraceAntennaCenterCorrectionArcCovariance': { 'name': 'GraceAntennaCenterCorrectionArcCovariance', 'key': 'GraceAntennaCenterCorrectionArcCovariance', 'description': 'This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC) from the orientation covariance matrices provided in Level-1B products via variance propagation. By using the output This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC) from the orientation covariance matrices provided in Level-1B products via variance propagation. By using the output in PreprocessingSst , noise model distinguishes between the stationary noise of ranging observations and the nonstationary AOC noise. The covariances are derived from the partial derivative of the AOC w.r.t. the roll/pitch/yaw rotations and star camera covariances This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC) from the orientation covariance matrices provided in Level-1B products via variance propagation. By using the output and This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC) from the orientation covariance matrices provided in Level-1B products via variance propagation. By using the output . The covariances for the range-rates and range-acceleration are computed by differentiating an interpolation polynomial of degree interpolationDegree .', 'config_table': 'outputfileSatelliteTrackingCovariance filename corrections for range, range-rate, and range-accelerations sstType choice range rangeRate rangeAcceleration inputfileOrbit1 filename inputfileOrbit2 filename inputfileStarCamera1 filename inputfileStarCamera2 filename inputfileScaCovariance1 filename inputfileScaCovariance2 filename sigmaAccelerometerX double [rad/s^2] sigmaAccelerometerY double [rad/s^2] sigmaAccelerometerZ double [rad/s^2] antennaCenters choice KBR antenna phase center value sequence center1X double x-coordinate of antenna position in SRF [m] for GRACEA center1Y double y-coordinate of antenna position in SRF [m] for GRACEA center1Z double z-coordinate of antenna position in SRF [m] for GRACEA center2X double x-coordinate of antenna position in SRF [m] for GRACEB center2Y double y-coordinate of antenna position in SRF [m] for GRACEB center2Z double z-coordinate of antenna position in SRF [m] for GRACEB file sequence inputAntennaCenters filename interpolationDegree uint differentiation by polynomial approximation of degree n', 'display_text': 'This program computes covariance information for the non-stationary noise of the KBR antenna offset correction (AOC) from the orientation covariance matrices provided in Level-1B products via variance propagation. By using the output outputfileSatelliteTrackingCovariance in PreprocessingSst, noise model distinguishes between the stationary noise of ranging observations and the nonstationary AOC noise.
The covariances for the range-rates and range-acceleration are computed by differentiating an interpolation polynomial of degree interpolationDegree.'},
'GraceOrbit2TransplantTimeOffset': { 'name': 'GraceOrbit2TransplantTimeOffset', 'key': 'GraceOrbit2TransplantTimeOffset', 'description': 'This program computes the time shift between two co-orbiting satellites based on dynamic orbit data. When applied to data of the first satellite, the computed time shift virtually shifts data of first satellite into the location of the second satellite. Note that inputfileOrbit1 and inputfileOrbit2 need velocity and acceleration data, which can be computed with OrbitAddVelocityAndAcceleration . The program tries to find a minimum of the objective function by applying Newton\'s method to the first derivative, thus iteratively computing This iteration is stopped when the difference between to consecutive time shift values falls below threshold or maximumIterations is reached. An initialGuess of the time shift can speed up convergence. See also OrbitAddVelocityAndAcceleration and InstrumentApplyTimeOffset .', 'config_table': 'outputfileTimeOffset filename estimated time offset in seconds (MISCVALUE) inputfileOrbit1 filename orbit data of satellite 1 inputfileOrbit2 filename orbit data of satellite 2 interpolationDegree uint polynomial degree for the interpolation of position, velocity and acceleration initialGuess double initial guess for the time shift [seconds] maximumIterations uint maximum number of iterations threshold double when the maximum difference between two iterations is below this value, stop [seconds]', 'display_text': 'This program computes the time shift between two co-orbiting satellites based on dynamic orbit data. When applied to data of the first satellite, the computed time shift virtually shifts data of first satellite into the location of the second satellite. Note that inputfileOrbit1 and inputfileOrbit2 need velocity and acceleration data, which can be computed with OrbitAddVelocityAndAcceleration. The program tries to find a minimum of the objective function \\[ f(\\Delta t) = \\| r_1(t) - r_2(t + \\Delta t) \\|^2, \\]by applying Newton\'s method to the first derivative, thus iteratively computing \\[ \\Delta t_{k+1} = \\Delta t_k + \\frac{f\'(\\Delta t_k)}{f\'\'(\\Delta t_k)}. \\]This iteration is stopped when the difference between to consecutive time shift values falls below threshold or maximumIterations is reached. An initialGuess of the time shift can speed up convergence.
See also OrbitAddVelocityAndAcceleration and InstrumentApplyTimeOffset.'},
'GraceSstResidualAnalysis': { 'name': 'GraceSstResidualAnalysis', 'key': 'GraceSstResidualAnalysis', 'description': 'This program applies the Multi-Resolution Analysis (MRA) using Discrete Wavelet Transform (DWT) to the monthly GRACE SST post-fit residuals. First, the residuals are transferred into wavelet domain by applying an 8 level Daubechies wavelet transform (default). In the next step, detail coefficients are merged into three major groups due to their approximate frequency subbands: Low scale details, corresponding to the frequency band above 10 mHz; Intermediate scale details, corresponding to the approximate frequency range above 3 mHz up to 10 mHz; High scale details, corresponding to the approximate frequency range above 0.5 mHz up to 10 mHz. In the last step, each group is reconstructed back into time domain.', 'config_table': 'outputfileInstrumentHighScale filename High scale details outputfileInstrumentMidScale filename Intermediate scale details outputfileInstrumentLowScale filename Low scale details inputfileInstrument filename GRACE SST Residuals inputfileWavelet filename wavelet coefficients', 'display_text': 'This program applies the Multi-Resolution Analysis (MRA) using Discrete Wavelet Transform (DWT) to the monthly GRACE SST post-fit residuals. First, the residuals are transferred into wavelet domain by applying an 8 level Daubechies wavelet transform (default). In the next step, detail coefficients are merged into three major groups due to their approximate frequency subbands:
Low scale details, corresponding to the frequency band above 10 mHz;
Intermediate scale details, corresponding to the approximate frequency range above 3 mHz up to 10 mHz;
High scale details, corresponding to the approximate frequency range above 0.5 mHz up to 10 mHz.
In the last step, each group is reconstructed back into time domain.'},
'GraceSstScaleModel': { 'name': 'GraceSstScaleModel', 'key': 'GraceSstScaleModel', 'description': 'This programs estimate satellite-to-satellite-tracking (SST) deterministic signals due to eclipse transits and low-SNR values from post-fit residuals. The low-SNR effects are estimated by directly using the residual values. The ensemble averaging method is used to characterize the average properties of eclipse transit signal shapes across all transit events. Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval .', 'config_table': 'inputfileGraceResiduals filename SST Residuals timeMargin uint epochs before instrumental events waveLength uint length of the sample wave estimateEclipseTransitScale sequence outputfileScaleModel filename inputfileGrace1EclipseFactor filename GRACE-A eclipse factors computed with integrated orbit inputfileGrace2EclipseFactor filename GRACE-B eclipse factors computed with integrated orbit averagingInterval sequence nearestNeighborNumber uint estimateLowSnrScale sequence outputfileScaleModel filename inputfileGraceSstSNR filename GRACE SNR values', 'display_text': 'This programs estimate satellite-to-satellite-tracking (SST) deterministic signals due to eclipse transits and low-SNR values from post-fit residuals. The low-SNR effects are estimated by directly using the residual values. The ensemble averaging method is used to characterize the average properties of eclipse transit signal shapes across all transit events. Each shape is assigned to one arc of 3 hours (default). This can be modefied by enabling averagingInterval.'},
'GraceSstSpecialEvents': { 'name': 'GraceSstSpecialEvents', 'key': 'GraceSstSpecialEvents', 'description': 'Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position ( Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position ( ) and orientation ( Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position ( ). Each type of event is represented by its mid-interval point per orbit revolution and is reported in Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position ( . The waveform of each event is nearly constant within one month and can be approximated by a polynomial. For the purpose of gravity field recovery, each waveform is parameterized by a polynomial and the coefficients of this polynomial are estimated as additional instrument calibration parameters in a common adjustment with all other instrument, satellite, and gravity field parameters, see Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position ( .', 'config_table': 'outputfileEvents filename outputfileIntervals filename inputfileOrbit1 filename inputfileOrbit2 filename inputfileStarCamera1 filename inputfileStarCamera2 filename ephemerides ephemeridesType eclipse eclipseType marginLeft double margin size (on both sides) [seconds] marginRight double margin size (on both sides) [seconds]', 'display_text': 'Time-indexing deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. The events are determined by satellites\' position (inputfileOrbit1/2) and orientation (inputfileStarCamera1/2). Each type of event is represented by its mid-interval point per orbit revolution and is reported in outputfileEvents.
The waveform of each event is nearly constant within one month and can be approximated by a polynomial. For the purpose of gravity field recovery, each waveform is parameterized by a polynomial and the coefficients of this polynomial are estimated as additional instrument calibration parameters in a common adjustment with all other instrument, satellite, and gravity field parameters, see parametrizationSatelliteTracking:specialEffect.'},
'GraceThrusterResponse2Accelerometer': { 'name': 'GraceThrusterResponse2Accelerometer', 'key': 'GraceThrusterResponse2Accelerometer', 'description': 'Add modeled thruster responses to accelerometer data. The epochs and durations are given in the Add modeled thruster responses to accelerometer data. The epochs and durations are given in the (THRUSTER). The Add modeled thruster responses to accelerometer data. The epochs and durations are given in the is a matrix with the linear accelerations in the SRF ( ) in one line per pair: Negative Yaw, Positive Pitch, Positive Yaw, Negative Pitch, Negative Roll, Positive Roll.', 'config_table': 'outputfileAccelerometer filename ACCELEROMETER inputfileAccelerometer filename ACCELEROMETER inputfileThruster filename THRUSTER inputfileThrusterResponse filename thruster model (matrix with one line per pair)', 'display_text': 'Add modeled thruster responses to accelerometer data. The epochs and durations are given in the inputfileThruster (THRUSTER).
The inputfileThrusterResponse is a $(6\\times 3)$ matrix with the linear accelerations in the SRF ($x, y, z$) in one line per pair:
Negative Yaw,
Positive Pitch,
Positive Yaw,
Negative Pitch,
Negative Roll,
Positive Roll.
'},
'InstrumentSatelliteTrackingAntennaCenterCorrection': { 'name': 'InstrumentSatelliteTrackingAntennaCenterCorrection', 'key': 'InstrumentSatelliteTrackingAntennaCenterCorrection', 'description': 'This program computes the correction due to offset of the antenna center relative the center of mass. The offsets and in This program computes the correction due to offset of the antenna center relative the center of mass. The offsets are given in the satellite reference frame. These offsets are rotated into the the inertial frame with and from This program computes the correction due to offset of the antenna center relative the center of mass. The offsets and projected onto the line of sight (LOS) with the unit vector in line of sight direction The corrections for the range-rates and range-acceleration are computed by differentiating an interpolation polynomial of degree interpolationDegree .', 'config_table': 'outputfileSatelliteTracking filename corrections for range, range-rate, and range-accelerations inputfileOrbit1 filename inputfileOrbit2 filename inputfileStarCamera1 filename inputfileStarCamera2 filename antennaCenters choice KBR antenna phase center value sequence center1X double x-coordinate of antenna position in SRF [m] for GRACEA center1Y double y-coordinate of antenna position in SRF [m] for GRACEA center1Z double z-coordinate of antenna position in SRF [m] for GRACEA center2X double x-coordinate of antenna position in SRF [m] for GRACEB center2Y double y-coordinate of antenna position in SRF [m] for GRACEB center2Z double z-coordinate of antenna position in SRF [m] for GRACEB file sequence inputAntennaCenters filename interpolationDegree uint differentiation by polynomial approximation of degree n', 'display_text': 'This program computes the correction due to offset of the antenna center relative the center of mass. The offsets $\\M c_A$ and $\\M c_B$ in inputfileAntennaCenters are given in the satellite reference frame. These offsets are rotated into the the inertial frame with $\\M D_A$ and $\\M D_B$ from inputfileStarCamera and projected onto the line of sight (LOS) \\[ \\rho_{AOC} = \\M e_{AB}\\cdot(\\M D_A\\,\\M c_A - \\M D_B\\,\\M c_B), \\]with the unit vector in line of sight direction \\[ \\M e_{AB} = \\frac{\\M r_B - \\M r_A}{\\left\\lVert{\\M r_B - \\M r_A}\\right\\rVert}. \\]The corrections for the range-rates and range-acceleration are computed by differentiating an interpolation polynomial of degree interpolationDegree.'},
'InstrumentStarCameraAngularAccelerometerFusion': { 'name': 'InstrumentStarCameraAngularAccelerometerFusion', 'key': 'InstrumentStarCameraAngularAccelerometerFusion', 'description': 'This program estimates the satellites orientation from star camera data This program estimates the satellites orientation from star camera data and angular accelerometer data This program estimates the satellites orientation from star camera data . The combination of both observation types is achieved in a least square adjustment. The optimal weighting between the two different observation groups is achieved by means of VCE in combination with a robust estimator. The system of linearized observation equations within the sensor fusion approach can be formulated as: with The reference values and are derived from This program estimates the satellites orientation from star camera data . In the course of the estimation, the accelerometer data is calibrated, by setting a bias factor with accBias .', 'config_table': 'outputfileStarCamera filename combined quaternions outputfileCovariance filename epoch-wise covariance matrix outputfileCovarianceMatrix filename full arc-wise covariance matrix per arc. arc number is appended to filename outputfileEpochSigmaStarCamera filename from vce and outlier detection outputfileEpochSigmaAccelerometer filename from vce and outlier detection outputfileAngularAcc filename angular acceleration observations (bias removed) outputfileSolution filename estimated parameter (one column for each arc) inputfileStarCameraReference filename quaternions as taylor point inputfileStarCamera filename star camera observations inputfileStarCameraCovariance filename star camera observations inputfileAngularAcc filename angular acceleration observations correctAccNonQuadratic boolean apply correction (non-square proof mass) accBias parametrizationTemporalType accelerometer bias per interval and axis accScale parametrizationTemporalType accelerometer scale per interval and axis sigmaStarcamera double [rad] sigmaAccelerometerX double [rad/s^2] sigmaAccelerometerY double [rad/s^2] sigmaAccelerometerZ double [rad/s^2] estimateSigmaScaPerAxis boolean separate variance factor for roll, pitch, yaw, instead of one common factor. estimateSigmaAccPerAxis boolean separate variance factor for each accelerometer axis, instead of one common factor. huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^power*sigma0 interpolationDegree uint iterationCount uint non linear equation solved iteratively', 'display_text': 'This program estimates the satellites orientation from star camera data inputfileStarCamera and angular accelerometer data inputfileAngularAcc. The combination of both observation types is achieved in a least square adjustment. The optimal weighting between the two different observation groups is achieved by means of VCE in combination with a robust estimator. The system of linearized observation equations within the sensor fusion approach can be formulated as: \\[ \\begin{bmatrix} \\M l_{ACC1B}\\\\ \\M l_{SCA1B} \\end{bmatrix} = \\begin{bmatrix} \\M A_{ACC1B} & \\M B_{ACC1B}\\\\ \\M A_{SCA1B} & \\M 0 \\end{bmatrix} \\begin{bmatrix} \\M q\\\\ \\M b \\end{bmatrix} = \\begin{bmatrix} \\frac{\\partial \\dot{\\boldsymbol{\\omega}}}{\\partial \\M q} & \\frac{\\partial \\dot{\\boldsymbol{\\omega}}}{\\partial \\M b}\\\\ \\M I & \\M 0 \\end{bmatrix} \\begin{bmatrix} \\M q\\\\ \\M b \\end{bmatrix} \\]with \\[\\begin{split} \\M l_{ACC1B} &= \\dot{\\boldsymbol{\\omega}}_{ACC1B} - \\dot{\\boldsymbol{\\omega}}_{0}, \\\\ \\M l_{SCA1B} &= \\M q_{SCA1B} - \\M q_{0}, \\\\ \\M q_{Fusion} &= \\M q + \\M q_{0}. \\end{split}\\]The reference values $\\M q_{0}$ and $\\dot{\\boldsymbol{\\omega}}_{0}$ are derived from inputfileStarCameraReference. In the course of the estimation, the accelerometer data is calibrated, by setting a bias factor $\\M b$ with accBias.'},
'Gravityfield2AbsoluteGravity': { 'name': 'Gravityfield2AbsoluteGravity', 'key': 'Gravityfield2AbsoluteGravity', 'description': 'This program computes the absolute value of gravity of a This program computes the absolute value of gravity on a given This program computes the absolute value of gravity . The result is multiplicated with factor . To get the full gravity vector in a terrestrial frame add the centrifugal part, see This program computes the absolute value of gravity . The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . It is intended to compute gravity anomalies from absolute gravity observations. To visualize the results use PlotMap .', 'config_table': 'outputfileGriddedData filename grid gridType gravityfield gravityfieldType factor double the result is multiplied by this factor, set -1 to subtract the field time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes the absolute value of gravity $\\left\\lVert{\\M g}\\right\\rVert$ of a gravityfield on a given grid. The result is multiplicated with factor. To get the full gravity vector in a terrestrial frame add the centrifugal part, see gravityfield:tides:centrifugal.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.
It is intended to compute gravity anomalies from absolute gravity observations. To visualize the results use PlotMap.'},
'Gravityfield2AreaMeanTimeSeries': { 'name': 'Gravityfield2AreaMeanTimeSeries', 'key': 'Gravityfield2AreaMeanTimeSeries', 'description': 'This program computes a time series of time variable This program computes a time series of time variable functionals averaged over a given area, e.g. equivalent water heights in the amazon basin. The type of functional (e.g gravity anomalies or geoid heights) can be choosen with This program computes a time series of time variable . The average is performed at each time step by a weigthed average over all This program computes a time series of time variable points where the weight is the associated area at each point. If removeMean is set the temporal mean is removed from the time series. To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics . Additionally the root mean square of the values in the area at each time step can is computed if compueRms is set. Additionally the accuracy of the value at each time step can be computed if compueSigma is set. The This program computes a time series of time variable is an instrument file with one, two, or three data columns. First data column contains the computed functionals and the following columns contain the RMS and the accuracies (optionally). To visualize the results use PlotGraph .', 'config_table': 'outputfileTimeSeries filename grid gridType timeSeries timeSeriesType kernel kernelType gravityfield gravityfieldType convertToHarmonics boolean gravityfield is converted to spherical harmonics before evaluation, may accelerate the computation multiplyWithArea boolean multiply time series with total area (useful for mass estimates) removeMean boolean remove the temporal mean of the series computeRms boolean additional rms each time step computeSigma boolean additional error bars at each time step', 'display_text': 'This program computes a time series of time variable gravityfield functionals averaged over a given area, e.g. equivalent water heights in the amazon basin. The type of functional (e.g gravity anomalies or geoid heights) can be choosen with kernel. The average is performed at each time step by a weigthed average over all grid points where the weight is the associated area at each point. If removeMean is set the temporal mean is removed from the time series. To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics.
Additionally the root mean square of the values in the area at each time step can is computed if compueRms is set.
Additionally the accuracy of the value at each time step can be computed if compueSigma is set.
The outputfileTimeSeries is an instrument file with one, two, or three data columns. First data column contains the computed functionals and the following columns contain the RMS and the accuracies (optionally).
'},
'Gravityfield2Deflections': { 'name': 'Gravityfield2Deflections', 'key': 'Gravityfield2Deflections', 'description': 'This program computes the deflections of the vertical in north direction and in east direction in radian according to where is the gravity vector from This program computes the deflections of the vertical in the local ellipsoidal system (north, east, up) and is the normal gravity at that point. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening .', 'config_table': 'outputfileGriddedData filename xi (north), eta (east) [rad] grid gridType gravityfield gravityfieldType time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes the deflections of the vertical $\\xi$ in north direction and $\\eta$ in east direction in radian according to \\[ \\xi = g_x/\\gamma \\qquad\\text{and}\\qquad \\eta=g_y/\\gamma, \\]where $\\M g=\\nabla V$ is the gravity vector from gravityfield in the local ellipsoidal system (north, east, up) and $\\gamma$ is the normal gravity at that point.
The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.'},
'Gravityfield2DegreeAmplitudes': { 'name': 'Gravityfield2DegreeAmplitudes', 'key': 'Gravityfield2DegreeAmplitudes', 'description': 'This program computes degree amplitudes from a This program computes degree amplitudes from a and saves them to a matrix file with three columns: the degree, the degree amplitude, and the formal errors. The coefficients can be converted to different functionals with This program computes degree amplitudes from a . The gravity field can be evaluated at different altitudes by specifying evaluationRadius . Polar regions can be excluded by setting polarGap . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . See also PotentialCoefficients2DegreeAmplitudes .', 'config_table': 'outputfileMatrix filename three column matrix with degree, signal amplitude, formal error gravityfield gravityfieldType kernel kernelType type choice type of variances rms degree amplitudes (square root of degree variances) accumulation cumulate variances over degrees median median of absolute values per degree time time at this time the gravity field will be evaluated evaluationRadius double evaluate the gravity field at this radius (default: evaluate at surface polarGap angle exclude polar regions (aperture angle in degrees) minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'This program computes degree amplitudes from a gravityfield and saves them to a matrix file with three columns: the degree, the degree amplitude, and the formal errors.
The coefficients can be converted to different functionals with kernel. The gravity field can be evaluated at different altitudes by specifying evaluationRadius. Polar regions can be excluded by setting polarGap. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
See also PotentialCoefficients2DegreeAmplitudes.'},
'Gravityfield2DegreeAmplitudesPlotGrid': { 'name': 'Gravityfield2DegreeAmplitudesPlotGrid', 'key': 'Gravityfield2DegreeAmplitudesPlotGrid', 'description': 'This program computes a This program computes a of a time variable This program computes a and saves it as degree amplitudes. The expansion is limited in the range between minDegree and maxDegree inclusivly The This program computes a is a matrix with every row containing the time, degree, degree amplitude, and the formal error. To visualize the results use PlotGraph ). See also Gravityfield2DegreeAmplitudes .', 'config_table': 'outputfileTimeSeries filename each row: mjd, degree, amplitude, formal error gravityfield gravityfieldType kernel kernelType timeSeries timeSeriesType evaluationRadius double evaluate the gravity field at this radius (default: evaluate at surface polarGap angle exclude polar regions (aperture angle in degrees) minDegree uint minimal degree maxDegree uint maximal degree GM double Geocentric gravitational constant R double reference radius', 'display_text': 'This program computes a timeSeries of a time variable gravityfield and saves it as degree amplitudes. The expansion is limited in the range between minDegree and maxDegree inclusivly \\[ \\sigma_n = \\frac{GM}{R}\\left(\\frac{R}{r}\\right)^{n+1}k_n\\sqrt{\\sum_{m=0}^n c_{nm}^2+s_{nm}^2}. \\] The outputfileTimeSeries is a matrix with every row containing the time, degree, degree amplitude, and the formal error.
'},
'Gravityfield2DisplacementTimeSeries': { 'name': 'Gravityfield2DisplacementTimeSeries', 'key': 'Gravityfield2DisplacementTimeSeries', 'description': 'This program computes a time series of displacements of a list of stations ( This program computes a time series of displacements of a list of stations ( ) due to the effect of time variable loading masses. The displacement of a station is calculated according to where is the normal gravity, the load Love and Shida numbers are given by This program computes a time series of displacements of a list of stations ( and the load Love numbers are given by This program computes a time series of displacements of a list of stations ( . The are the spherical harmonics expansion of the full time variable gravitational potential (potential of the loading mass + deformation potential): Deformations due to Earth tide and due to polar tides are computed using the IERS conventions. Eq. is not used in these cases. The outputfileTimeSeries is an instrument file , MISCVALUES. The data columns contain the deformation of each station in in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set.', 'config_table': 'outputfileTimeSeries filename x,y,z [m] per station grid gridType station list timeSeries timeSeriesType gravityfield gravityfieldType tides tidesType earthRotation earthRotationType ephemerides ephemeridesType inputfileDeformationLoadLoveNumber filename inputfilePotentialLoadLoveNumber filename if full potential is given and not only loading potential removeMean boolean remove the temporal mean of each coordinate localReferenceFrame boolean local left handed reference frame (north, east, up)', 'display_text': 'This program computes a time series of displacements of a list of stations (grid) due to the effect of time variable loading masses. The displacement $\\M u$ of a station is calculated according to \\[\\label{eq:displacement} \\M u(\\M r) = \\frac{1}{\\gamma}\\sum_{n=0}^\\infty \\left[\\frac{h_n}{1+k_n}V_n(\\M r)\\,\\M e_{up} + R\\frac{l_n}{1+k_n}\\left( \\frac{\\partial V_n(\\M r)}{\\partial \\M e_{north}}\\M e_{north} +\\frac{\\partial V_n(\\M r)}{\\partial \\M e_{east}} \\M e_{east}\\right)\\right], \\]where $\\gamma$ is the normal gravity, the load Love and Shida numbers $h_n,l_n$ are given by inputfileDeformationLoadLoveNumber and the load Love numbers $k_n$ are given by inputfilePotentialLoadLoveNumber. The $V_n$ are the spherical harmonics expansion of the full time variable gravitational potential (potential of the loading mass + deformation potential): \\[ V(\\M r) = \\sum_{n=0}^\\infty V_n(\\M r). \\]Deformations due to Earth tide and due to polar tides are computed using the IERS conventions. Eq. \\eqref{eq:displacement} is not used in these cases.
The outputfileTimeSeries is an instrument file, MISCVALUES. The data columns contain the deformation of each station in $x,y,z$ in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set.'},
'Gravityfield2EmpiricalCovariance': { 'name': 'Gravityfield2EmpiricalCovariance', 'key': 'Gravityfield2EmpiricalCovariance', 'description': 'This program estimates an spatial and temporal covariance matrix from a time series of gravity fields. Firstly for every time step a spherical harmonics vector from the time variable gravity field is generated. The coefficients of the spherical harmonics expansion are in the sequence given by This program estimates an spatial and temporal covariance matrix from a time series of gravity fields. . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . In the next step the empirical covariance matrix is estimated according to where is given by differenceStep . From the diagonal elements of the isotropic accuracies are computed and a diagonal matrix is constructed . The result is computed:', 'config_table': 'outputfileCovarianceMatrix filename outputfilePotentialCoefficients filename gravityfield gravityfieldType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme for solution vector removeMean boolean timeSeries timeSeriesType sampling of the gravityfield differenceStep uint choose dt for: x,i(t) - x,j(t+dt) factorFullMatrixPart double factorIsotropicPart double intervals timeSeriesType', 'display_text': 'This program estimates an spatial and temporal covariance matrix from a time series of gravity fields.
Firstly for every time step $t_i$ a spherical harmonics vector $\\M x_i$ from the time variable gravity field is generated. The coefficients of the spherical harmonics expansion are in the sequence given by numbering. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
In the next step the empirical covariance matrix is estimated according to \\[ \\M\\Sigma(\\Delta i)_{full} = \\frac{1}{N}\\sum_{i=1}^N \\M x_i \\M x_{i+\\Delta i}^T, \\]where $\\Delta i$ is given by differenceStep.
From the diagonal elements of $\\M\\Sigma(\\Delta i)$ the isotropic accuracies are computed \\[ \\sigma_n^2 = \\frac{1}{2n+1}\\sum_{m=0}^n \\sigma_{cnm}^2+\\sigma_{snm}^2, \\]and a diagonal matrix is constructed $\\Sigma_{iso} = \\text{diag}(\\sigma_2^2,\\ldots,\\sigma_N^2)$. The result is computed: \\[ \\M\\Sigma(\\Delta i) = \\alpha_{full}\\M\\Sigma(\\Delta i)_{full}+\\alpha_{iso}\\M\\Sigma(\\Delta i)_{iso}. \\]'},
'Gravityfield2Gradients': { 'name': 'Gravityfield2Gradients', 'key': 'Gravityfield2Gradients', 'description': 'This program computes gravity gradients from This program computes gravity gradients from on a This program computes gravity gradients from in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set. In This program computes gravity gradients from the values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening .', 'config_table': 'outputfileGriddedData filename Vxx Vyy Vzz Vxy Vxz Vyz grid gridType gravityfield gravityfieldType localReferenceFrame boolean local left handed reference frame (north, east, up) time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes gravity gradients from gravityfield on a grid in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set. In outputfileGriddedData the values $[Vxx, Vyy, Vzz, Vxy, Vxz, Vyz]$ will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.'},
'Gravityfield2GravityVector': { 'name': 'Gravityfield2GravityVector', 'key': 'Gravityfield2GravityVector', 'description': 'This program computes gravity vectors from This program computes gravity vectors from on a This program computes gravity vectors from in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set. In This program computes gravity vectors from the values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening .', 'config_table': 'outputfileGriddedData filename gx, gy, gz grid gridType gravityfield gravityfieldType localReferenceFrame boolean local left handed reference frame (north, east, up) time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes gravity vectors from gravityfield on a grid in a global terrestrial reference frame or alternatively in a local elliposidal frame (north, east, up) if localReferenceFrame is set. In outputfileGriddedData the values $[gx, gy, gz]$ will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.'},
'Gravityfield2GridCovarianceMatrix': { 'name': 'Gravityfield2GridCovarianceMatrix', 'key': 'Gravityfield2GridCovarianceMatrix', 'description': 'This program propagates the covariance matrix of a This program propagates the covariance matrix of a evaluated at time to a This program propagates the covariance matrix of a . The full variance-covariance matrix is computed and written to a matrix file : The This program propagates the covariance matrix of a determines the quantity of the grid values, for example, This program propagates the covariance matrix of a . See also GravityfieldCovariancesPropagation2GriddedData , GravityfieldVariancesPropagation2GriddedData .', 'config_table': 'outputfileMatrix filename symmetric grid covariance matrix grid gridType kernel kernelType gravityfield gravityfieldType time time at this time the gravity field will be evaluated', 'display_text': 'This program propagates the covariance matrix of a gravityfield evaluated at time to a grid. The full variance-covariance matrix is computed and written to a matrix file: \\[ \\mathbf{\\Sigma}_\\mathbf{y} = \\mathbf{F}\\mathbf{\\Sigma}_\\mathbf{x}\\mathbf{F}^T \\]The kernel determines the quantity of the grid values, for example, kernel:waterHeight.
See also GravityfieldCovariancesPropagation2GriddedData, GravityfieldVariancesPropagation2GriddedData.'},
'Gravityfield2GriddedData': { 'name': 'Gravityfield2GriddedData', 'key': 'Gravityfield2GriddedData', 'description': 'This program computes values of a This program computes values of a on a given This program computes values of a . The type of value (e.g gravity anomalies or geoid heights) can be choosen with This program computes values of a . If a time is given the gravity field will be evaluated at this point of time otherwise only the static part will be used. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics . To visualize the results use PlotMap .', 'config_table': 'outputfileGriddedData filename grid gridType kernel kernelType gravityfield gravityfieldType convertToHarmonics boolean gravityfield is converted to spherical harmonics before evaluation, may accelerate the computation time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes values of a gravityfield on a given grid. The type of value (e.g gravity anomalies or geoid heights) can be choosen with kernel. If a time is given the gravity field will be evaluated at this point of time otherwise only the static part will be used. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening. To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics.
To visualize the results use PlotMap.'},
'Gravityfield2GriddedDataTimeSeries': { 'name': 'Gravityfield2GriddedDataTimeSeries', 'key': 'Gravityfield2GriddedDataTimeSeries', 'description': 'This program computes values of a This program computes values of a on a given This program computes values of a for each time step of This program computes values of a . The type of value (e.g gravity anomalies or geoid heights) can be choosen with This program computes values of a . To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics . The This program computes values of a is an instrument (MISCVALUES) file with a data column for each grid point per epoch. This program enables the use of all instrument programs like InstrumentFilter , InstrumentArcStatistics or InstrumentDetrend to analyze and manipulate time series of gridded data. See also TimeSeries2GriddedData , Gravityfield2GriddedData', 'config_table': 'outputfileTimeSeries filename each epoch: data of grid points (MISCVALUES) grid gridType kernel kernelType gravityfield gravityfieldType convertToHarmonics boolean gravityfield is converted to spherical harmonics before evaluation, may accelerate the computation timeSeries timeSeriesType', 'display_text': 'This program computes values of a gravityfield on a given grid for each time step of timeSeries. The type of value (e.g gravity anomalies or geoid heights) can be choosen with kernel. To speed up the computation the gravity field can be converted to spherical harmonics before the computation with convertToHarmonics. The outputfileTimeSeries is an instrument (MISCVALUES) file with a data column for each grid point per epoch.
See also TimeSeries2GriddedData, Gravityfield2GriddedData'},
'Gravityfield2PotentialCoefficients': { 'name': 'Gravityfield2PotentialCoefficients', 'key': 'Gravityfield2PotentialCoefficients', 'description': 'This program evaluates a time variable This program evaluates a time variable at a given time and saves it as a spherical harmonics file . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM .', 'config_table': 'outputfilePotentialCoefficients filename gravityfield gravityfieldType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius time time at this time the gravity field will be evaluated', 'display_text': 'This program evaluates a time variable gravityfield at a given time and saves it as a spherical harmonics file. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.'},
'Gravityfield2PotentialCoefficientsTimeSeries': { 'name': 'Gravityfield2PotentialCoefficientsTimeSeries', 'key': 'Gravityfield2PotentialCoefficientsTimeSeries', 'description': 'This program computes a This program computes a of a time variable This program computes a and converts to coefficients of a spherical harmonics expansion. The expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . The This program computes a contains the potential coefficients as data columns for each epoch in the sequence given by This program computes a .', 'config_table': 'outputfileTimeSeries filename instrument file (MISCVALUES) gravityfield gravityfieldType timeSeries timeSeriesType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme', 'display_text': 'This program computes a timeSeries of a time variable gravityfield and converts to coefficients of a spherical harmonics expansion. The expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
The outputfileTimeSeries contains the potential coefficients as data columns for each epoch in the sequence given by numbering.'},
'Gravityfield2SphericalHarmonicsVector': { 'name': 'Gravityfield2SphericalHarmonicsVector', 'key': 'Gravityfield2SphericalHarmonicsVector', 'description': 'This program evaluates a time variable This program evaluates a time variable at a given time and saves a vector with the coefficients of a spherical harmonics expansion in the sequence given by This program evaluates a time variable . If set the expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM . This coefficients vector can be used as a approximate solution, see NormalsMultiplyAdd , or as pseudo oberservations for regularization, see This program evaluates a time variable . For back transformation use Gravityfield2PotentialCoefficients with This program evaluates a time variable .', 'config_table': 'outputfileVector filename gravityfield gravityfieldType startIndex uint start index to put the coefficients in the solution vector minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme for solution vector time time at this time the gravity field will be evaluated useSigma boolean use formal errors instead of coefficients', 'display_text': 'This program evaluates a time variable gravityfield at a given time and saves a vector with the coefficients of a spherical harmonics expansion in the sequence given by numbering. If set the expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
For back transformation use Gravityfield2PotentialCoefficients with gravityfield:fromParametrization.'},
'Gravityfield2TimeSplines': { 'name': 'Gravityfield2TimeSplines', 'key': 'Gravityfield2TimeSplines', 'description': 'This program estimates splines in time domain from a time variable gravity field and writes This program estimates splines in time domain from a time variable gravity field and writes . The This program estimates splines in time domain from a time variable gravity field and writes is sampled at This program estimates splines in time domain from a time variable gravity field and writes , converted to potential coefficients in the range between minDegree and maxDegree inclusively. The time series of spherical harmonics can be temporal filtered with This program estimates splines in time domain from a time variable gravity field and writes . In the next step temporal splines with splineDegree and nodal points given at This program estimates splines in time domain from a time variable gravity field and writes are adjusted to the time series in a least squares sense. This is very fast for block means (splineDegree = 0) but for other splines a large systems of equations must be solved. In the adjustment process the time series of gravity fields can be interpreted as samples at the given times or as continuous function with linear behaviour between sampled points ( linearInterpolation ). To combine a series of potential coefficients to a spline file with block means (splineDegree = 0) use the fast PotentialCoefficients2BlockMeanTimeSplines instead.', 'config_table': 'outputfileTimeSplines filename gravityfield gravityfieldType temporalFilter digitalFilterType filter sampled gravity field in time minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius sampling timeSeriesType gravity field is sampled at these times removeMean boolean remove the temporal mean of the series before estimating the splines linearInterpolation boolean assume linear behavior between sampled points splineDegree uint degree of splines splineTimeSeries timeSeriesType nodal points of splines in time domain', 'display_text': 'This program estimates splines in time domain from a time variable gravity field and writes outputfileTimeSplines. The gravityfield is sampled at sampling, converted to potential coefficients in the range between minDegree and maxDegree inclusively. The time series of spherical harmonics can be temporal filtered with temporalFilter.
In the next step temporal splines with splineDegree and nodal points given at splineTimeSeries are adjusted to the time series in a least squares sense. This is very fast for block means (splineDegree = 0) but for other splines a large systems of equations must be solved. In the adjustment process the time series of gravity fields can be interpreted as samples at the given times or as continuous function with linear behaviour between sampled points (linearInterpolation).
To combine a series of potential coefficients to a spline file with block means (splineDegree = 0) use the fast PotentialCoefficients2BlockMeanTimeSplines instead.'},
'Gravityfield2TrendPotentialCoefficients': { 'name': 'Gravityfield2TrendPotentialCoefficients', 'key': 'Gravityfield2TrendPotentialCoefficients', 'description': 'This program estimates This program estimates (e.g. mean, trend, annual) from a time variable gravity field. In a first step a time variable This program estimates is sampled at This program estimates and converted to coefficients of a spherical harmonics expansion. The expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM . These coefficients serves as observations of a robust least squares adjustment to estimate This program estimates parameters. For each temporal parameter an This program estimates is generated.', 'config_table': 'outputfilePotentialCoefficients filename for each temporal parameter gravityfield gravityfieldType timeSeries timeSeriesType parametrizationTemporal parametrizationTemporalType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius huber double for robust least squares huberPower double for robust least squares huberMaxIteration uint (maximum) number of iterations for robust estimation', 'display_text': 'This program estimates parametrizationTemporal (e.g. mean, trend, annual) from a time variable gravity field.
In a first step a time variable gravityfield is sampled at timeSeries and converted to coefficients of a spherical harmonics expansion. The expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
These coefficients serves as observations of a robust least squares adjustment to estimate parametrizationTemporal parameters. For each temporal parameter an outputfilePotentialCoefficients is generated.'},
'GravityfieldCovariancesPropagation2GriddedData': { 'name': 'GravityfieldCovariancesPropagation2GriddedData', 'key': 'GravityfieldCovariancesPropagation2GriddedData', 'description': 'This program computes the covariance between a source point given by longitude/latitude ( L , B ) and the points of a This program computes the covariance between a source point given by longitude/latitude ( in terms of the functional given by This program computes the covariance between a source point given by longitude/latitude ( from the variance-covariance matrix of a This program computes the covariance between a source point given by longitude/latitude ( evaluated at time . If computeCorrelation is set, the program returns the correlation according to in the range of [-1, 1] instead of the covariance. See also Gravityfield2GridCovarianceMatrix , GravityfieldVariancesPropagation2GriddedData .', 'config_table': 'outputfileGriddedData filename gridded data file containing the covariance betwenn source point and grid points grid gridType kernel kernelType functional gravityfield gravityfieldType time time at this time the gravity field will be evaluated L angle longitude of variance point B angle latitude of variance point height double ellipsoidal height of source point computeCorrelation boolean compute correlations instead of covariances R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program computes the covariance between a source point given by longitude/latitude (L, B) and the points of a grid in terms of the functional given by kernel from the variance-covariance matrix of a gravityfield evaluated at time.
If computeCorrelation is set, the program returns the correlation according to \\[ r_{ij} = \\frac{\\sigma_{ij}}{\\sigma_i \\sigma_j} \\]in the range of [-1, 1] instead of the covariance.
See also Gravityfield2GridCovarianceMatrix, GravityfieldVariancesPropagation2GriddedData.'},
'GravityfieldReplacePotentialCoefficients': { 'name': 'GravityfieldReplacePotentialCoefficients', 'key': 'GravityfieldReplacePotentialCoefficients', 'description': 'Replaces single potential coefficients in a gravity field. Both Replaces single potential coefficients in a gravity field. Both and Replaces single potential coefficients in a gravity field. Both are evaluated at time and converted to spherical harmonic coefficients. Single coefficients are then replaced in Replaces single potential coefficients in a gravity field. Both by the values from Replaces single potential coefficients in a gravity field. Both and the result is written to Replaces single potential coefficients in a gravity field. Both from minDegree to maxDegree ,', 'config_table': 'outputfilePotentialCoefficients filename gravityfield gravityfieldType single coefficients are replaced by the other gravityfield gravityfieldReplacement gravityfieldType contains the coefficients for replacement coefficients choice cnm sequence degree uint order uint snm sequence degree uint order uint minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius time time at this time the gravity field will be evaluated', 'display_text': 'Replaces single potential coefficients in a gravity field. Both gravityfield and gravityfieldReplacement are evaluated at time and converted to spherical harmonic coefficients. Single coefficients are then replaced in gravityfield by the values from gravityfieldReplacement and the result is written to outputfilePotentialCoefficients from minDegree to maxDegree,'},
'GravityfieldVariancesPropagation2GriddedData': { 'name': 'GravityfieldVariancesPropagation2GriddedData', 'key': 'GravityfieldVariancesPropagation2GriddedData', 'description': 'This program propagates variance-covariance matrix of a This program propagates variance-covariance matrix of a evaluated at time to the points of a This program propagates variance-covariance matrix of a in terms of the functional given by This program propagates variance-covariance matrix of a . The resulting outputfileGriddedData contains the standard deviations of the grid points. See also Gravityfield2GridCovarianceMatrix , GravityfieldCovariancesPropagation2GriddedData .', 'config_table': 'outputfileGriddedData filename standard deviation at each grid point grid gridType kernel kernelType functional gravityfield gravityfieldType time time at this time the gravity field will be evaluated R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program propagates variance-covariance matrix of a gravityfield evaluated at time to the points of a grid in terms of the functional given by kernel. The resulting outputfileGriddedData contains the standard deviations of the grid points.
See also Gravityfield2GridCovarianceMatrix, GravityfieldCovariancesPropagation2GriddedData.'},
'GriddedData2AreaMeanTimeSeries': { 'name': 'GriddedData2AreaMeanTimeSeries', 'key': 'GriddedData2AreaMeanTimeSeries', 'description': 'This program computes a time series of area mean values in a basin represented by This program computes a time series of area mean values in a basin represented by from a sequence of grid files. If a file is not found, the epoch is skipped. Per default the weighted average of all points in the given border is computed where the points are weighted by their area element. If computeMean is set, the time average of each grid points is subtracted before the computation. If multiplyWithArea is set, the weighted average is multiplied with the total basin area. This is useful for computing the total mass in the basin. The This program computes a time series of area mean values in a basin represented by is an instrument file, where the first columns are the mean value each data column in the grid files, followed by the the weighted RMS for each data column in the grid files if computeRms is set. If the number of data columns differs between the grid files, the remaining columns are padded with zeros. See also Gravityfield2AreaMeanTimeSeries .', 'config_table': 'outputfileTimeSeries filename inputfileGriddedData filename border borderType timeSeries timeSeriesType multiplyWithArea boolean multiply time series with total area (useful for mass estimates) removeMean boolean remove the temporal mean of the series computeRms boolean additional rms each time step', 'display_text': 'This program computes a time series of area mean values in a basin represented by border from a sequence of grid files. If a file is not found, the epoch is skipped. Per default the weighted average of all points in the given border is computed where the points are weighted by their area element.
If computeMean is set, the time average of each grid points is subtracted before the computation. If multiplyWithArea is set, the weighted average is multiplied with the total basin area. This is useful for computing the total mass in the basin.
The outputfileTimeSeries is an instrument file, where the first columns are the mean value each data column in the grid files, followed by the the weighted RMS for each data column in the grid files if computeRms is set. If the number of data columns differs between the grid files, the remaining columns are padded with zeros.
See also Gravityfield2AreaMeanTimeSeries.'},
'GriddedData2GriddedDataStatistics': { 'name': 'GriddedData2GriddedDataStatistics', 'key': 'GriddedData2GriddedDataStatistics', 'description': 'This program assigns values This program assigns values to the nearest points of a new This program assigns values . If some of the new points are not filled in with data emptyValue is used instead. If multiple points of the input fall on the same node the result can be selected with statistics (e.g. mean, root mean square, min, max, ). It also is possible to simply count the number of data points that were assigned to each point. Be aware in case borders are given within This program assigns values , the This program assigns values will have points excluded before the assignement of old points to the new points. The data from This program assigns values will not be limited by the given borders! See GriddedDataConcatenate to limit the This program assigns values to given borders.', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename grid gridType statistic choice statistic used if multiple values fall on the same cell mean mean wmean area weighted mean rms root mean square wrms area weighted root mean square std standard deviation wstd area weighted standard deviation sum sum min minimum value max maximum value count number of values first first value last last value emptyValue double value for nodes without data R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program assigns values inputfileGriddedData to the nearest points of a new grid. If some of the new points are not filled in with data emptyValue is used instead. If multiple points of the input fall on the same node the result can be selected with statistics (e.g. mean, root mean square, min, max, ). It also is possible to simply count the number of data points that were assigned to each point.
'},
'GriddedData2GriddedDataTimeSeries': { 'name': 'GriddedData2GriddedDataTimeSeries', 'key': 'GriddedData2GriddedDataTimeSeries', 'description': 'Write a series of Write a series of with the corresponding Write a series of as a single gridded data time series file . The splineDegree defines the possible temporal interpolation of data in the output file. For a file with spline degree 0 (temporal block means) the time intervals in which the grids are valid are defined between adjacent points in time. Therefore one more point in time is needed than the number of input grid files for degree 0. See also GriddedDataTimeSeries2GriddedData .', 'config_table': 'outputfileGriddedDataTimeSeries filename inputfileGriddedData filename file count must agree with number of times+splineDegre-1 timeSeries timeSeriesType splineDegree uint degree of splines', 'display_text': 'Write a series of inputfileGriddedData with the corresponding timeSeries as a single gridded data time series file. The splineDegree defines the possible temporal interpolation of data in the output file. For a file with spline degree 0 (temporal block means) the time intervals in which the grids are valid are defined between adjacent points in time. Therefore one more point in time is needed than the number of input grid files for degree 0.
See also GriddedDataTimeSeries2GriddedData.'},
'GriddedData2Matrix': { 'name': 'GriddedData2Matrix', 'key': 'GriddedData2Matrix', 'description': 'This program converts This program converts to This program converts with data columns. The grid is expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . The content of the output matrix can be controlled by outColumn expressions applied to every grid point. The common data variables for grids are available, see dataVariables .', 'config_table': 'outputfileMatrix filename point list as matrix with longitude and latitude values in columns and possible additional columns inputfileGriddedData filename R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates outColumn expression expression (variables: longitude, latitude, height, area, data0, data1, ...)', 'display_text': 'This program converts inputfileGriddedData to outputfileMatrix with data columns. The grid is expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening. The content of the output matrix can be controlled by outColumn expressions applied to every grid point. The common data variables for grids are available, see dataVariables.'},
'GriddedData2PotentialCoefficients': { 'name': 'GriddedData2PotentialCoefficients', 'key': 'GriddedData2PotentialCoefficients', 'description': 'This program estimate potential coefficients from This program estimate potential coefficients from gravity field functionals. It used a simple quadrature formular or a leastSquares adjustment with block diagonal normal matrix (order by order). For the latter one the data must be regular distributed. The value s and the weight s are expressions using the common data variables for grids, see dataVariables . Multiple This program estimate potential coefficients from can be estimated in one step. For each an indivdual value must be specified. The type of the gridded data (e.g gravity anomalies or geoid heights) must be set with This program estimate potential coefficients from . The expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM . For irregular distributed data and using the full variance covariance matrix use NormalsSolverVCE together with This program estimate potential coefficients from and This program estimate potential coefficients from . See also GriddedDataTimeSeries2PotentialCoefficients .', 'config_table': 'outputfilePotentialCoefficients filename one file for each value expression inputfileGriddedData filename value expression expression to compute values (input columns are named data0, data1, ...) weight expression expression to compute values (input columns are named data0, data1, ...) kernel kernelType data type of input values minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius leastSquares boolean false: quadrature formular, true: least squares adjustment order by order', 'display_text': 'This program estimate potential coefficients from inputfileGriddedData gravity field functionals. It used a simple quadrature formular \\[ c_{nm} = \\frac{1}{4\\pi}\\frac{R}{GM} \\sum_i f_i \\left(\\frac{r_i}{R}\\right)^{n+1} k_n C_{nm}(\\lambda_i,\\vartheta_i)\\,\\Delta\\Phi_i \\]or a leastSquares adjustment with block diagonal normal matrix (order by order). For the latter one the data must be regular distributed.
The values $f_i$ and the weights $\\Delta\\Phi_i$ are expressions using the common data variables for grids, see dataVariables. Multiple outputfilePotentialCoefficients can be estimated in one step. For each an indivdual value must be specified. The type of the gridded data (e.g gravity anomalies or geoid heights) must be set with kernel $k_n$.
The expansion is limited in the range between minDegree and maxDegree inclusively. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
See also GriddedDataTimeSeries2PotentialCoefficients.'},
'GriddedData2SphericalDistance': { 'name': 'GriddedData2SphericalDistance', 'key': 'GriddedData2SphericalDistance', 'description': 'Compute the spherical distance on the unit sphere in radians between all point pairs of two grids. The spherical distance is computed by where is the (normalized) position. This implies that all points are projected onto the unit sphere.', 'config_table': 'outputfileMatrix filename matrix containing the spherical distance between all point pairs [rad] grid1 gridType grid2 gridType', 'display_text': 'Compute the spherical distance on the unit sphere in radians between all point pairs of two grids. The spherical distance is computed by \\[ \\psi_{12} = \\arccos(\\M n_1 \\cdot \\M n_2), \\]where $\\M n_i$ is the (normalized) position. This implies that all points are projected onto the unit sphere.'},
'GriddedData2TimeSeries': { 'name': 'GriddedData2TimeSeries', 'key': 'GriddedData2TimeSeries', 'description': 'Write a series of Write a series of with the corresponding Write a series of as a single time series file ( instrument , MISCVALUES). If groupDataByPoints is true the outputfileTimeSeries starts for each epoch with all data ( data0 , data1 ) for the first point, followed by all data of the second point and so on. If groupDataByPoints is false, the file starts with data0 for all points, followed by all data1 and so on. This enables the use of all instrument programs like InstrumentFilter or InstrumentDetrend to analyze and manipulate time series of gridded data. See also TimeSeries2GriddedData .', 'config_table': 'outputfileTimeSeries filename each epoch: multiple data for points (MISCVALUES) inputfileGriddedData filename file count must agree with number of times timeSeries timeSeriesType groupDataByPoints boolean multiple data are given point by point, otherwise: data0 for all points, followed by all data1', 'display_text': 'Write a series of inputfileGriddedData with the corresponding timeSeries as a single time series file (instrument, MISCVALUES).
If groupDataByPoints is true the outputfileTimeSeries starts for each epoch with all data (data0, data1) for the first point, followed by all data of the second point and so on. If groupDataByPoints is false, the file starts with data0 for all points, followed by all data1 and so on.
This enables the use of all instrument programs like InstrumentFilter or InstrumentDetrend to analyze and manipulate time series of gridded data.
See also TimeSeries2GriddedData.'},
'GriddedDataCalculate': { 'name': 'GriddedDataCalculate', 'key': 'GriddedDataCalculate', 'description': 'This program manipulates grid files with data in columns similar to FunctionsCalculate , see there for more details. If several inputfile s are given the data columns are copied side by side. All inputfile s must contain the same grid points. The columns are enumerated by data0 , data1 , . The content of This program manipulates is controlled by outColumn . The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each grid point of the input. The variables data0 , data1 , are replaced by the according values from the input columns before. Additional variables are available, e.g. index , data0rms , see dataVariables . For a simplified handling constant s can be defined by name=value . It is also possible to estimate parameter s in a least squares adjustment. The leastSquares serves as template for observation equations for every point. The expression leastSquares is evaluated for each grid point. The variables data0 , data1 , are replaced by the according values from the input columns before. In the next step the parameters are estimated in order to minimize the expressions in leastSquares in the sense of least squares. Afterwards grid points are removed if one of the removalCriteria expressions for this grid point evaluates true (not zero). An extra This program manipulates can be generated with one row of data. For the computation of the outColumn values all dataVariables are available (e.g. data3mean , data4std ) inclusively the constant s and estimated parameter s but without the data0 , data1 , itself. The variables and the numbering of the columns refers to the This program manipulates . See also FunctionsCalculate , InstrumentArcCalculate , MatrixCalculate .', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename constant expression define a constant by name=value parameter expression define a parameter by name[=value] leastSquares expression try to minimize the expression by adjustment of the parameters removalCriteria expression points are removed if one criterion evaluates true. data0 is the first data field. longitude expression expression latitude expression expression height expression expression area expression expression: e.g. deltaL * 2.0 * sin(deltaB/2.0) * cos(latitude/rho) value expression expression to compute values (input columns are named data0, data1, ...) computeArea boolean automatically area computation of rectangular grids (overwrite area) R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates statistics sequence outputfile filename matrix with one row, columns are user defined outColumn expression expression to compute statistics columns, data* are the outputColumns', 'display_text': 'This program manipulates grid files with data in columns similar to FunctionsCalculate, see there for more details. If several inputfiles are given the data columns are copied side by side. All inputfiles must contain the same grid points. The columns are enumerated by data0, data1, .
The content of outputfileGriddedData is controlled by outColumn. The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each grid point of the input. The variables data0, data1, are replaced by the according values from the input columns before. Additional variables are available, e.g. index, data0rms, see dataVariables.
For a simplified handling constants can be defined by name=value. It is also possible to estimate parameters in a least squares adjustment. The leastSquares serves as template for observation equations for every point. The expression leastSquares is evaluated for each grid point. The variables data0, data1, are replaced by the according values from the input columns before. In the next step the parameters are estimated in order to minimize the expressions in leastSquares in the sense of least squares.
Afterwards grid points are removed if one of the removalCriteria expressions for this grid point evaluates true (not zero).
An extra statistics:outputfile can be generated with one row of data. For the computation of the outColumn values all dataVariables are available (e.g. data3mean, data4std) inclusively the constants and estimated parameters but without the data0, data1, itself. The variables and the numbering of the columns refers to the outputfileGriddedData.
See also FunctionsCalculate, InstrumentArcCalculate, MatrixCalculate.'},
'GriddedDataConcatenate': { 'name': 'GriddedDataConcatenate', 'key': 'GriddedDataConcatenate', 'description': 'This program concatenate grid from several This program concatenate grid from several and write it to a new This program concatenate grid from several . Input files must have the same number of data columns. If sort is enabled, the points are sorted by latitudes starting from north/west to south east. Identical points (within a margin ) can be removed with removeDuplicates .', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename border borderType sortPoints boolean sort from north/west to south east removeDuplicates choice remove duplicate points keepFirst sequence keep first point, remove all other identicals margin double margin distance for identical points [m] keepLast sequence keep last point, remove all other identicals margin double margin distance for identical points [m] R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program concatenate grid from several inputfileGriddedData and write it to a new outputfileGriddedData. Input files must have the same number of data columns. If sort is enabled, the points are sorted by latitudes starting from north/west to south east. Identical points (within a margin) can be removed with removeDuplicates.'},
'GriddedDataCreate': { 'name': 'GriddedDataCreate', 'key': 'GriddedDataCreate', 'description': 'This program creates a This program creates a and writes it to This program creates a . The grid is expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . Extra value columns can be appended using expressions with the common data variables for gridded data.', 'config_table': 'outputfileGrid filename grid gridType R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates value expression expression (variables as \'longitude\', \'height\', \'area\' are taken from the gridded data)', 'display_text': 'This program creates a grid and writes it to outputfileGrid. The grid is expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening. Extra value columns can be appended using expressions with the common data variables for gridded data.'},
'GriddedDataInterpolate': { 'name': 'GriddedDataInterpolate', 'key': 'GriddedDataInterpolate', 'description': 'Interpolate values of a regular rectangular Interpolate values of a regular rectangular to new points given by Interpolate values of a regular rectangular and write as Interpolate values of a regular rectangular . Only longitude and latitude of points are considered; the height is ignored for interpolation. (Only nearest neighbor method is implemented at the moment.)', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename must be rectangular grid gridType method choice nearestNeighbor', 'display_text': 'Interpolate values of a regular rectangular inputfileGriddedData to new points given by grid and write as outputfileGriddedData. Only longitude and latitude of points are considered; the height is ignored for interpolation.
(Only nearest neighbor method is implemented at the moment.)
'},
'GriddedDataReduceSampling': { 'name': 'GriddedDataReduceSampling', 'key': 'GriddedDataReduceSampling', 'description': 'Generate coarse grid by computing area weighted mean values. The number of points is decimated by averaging integer multiplies of grid points ( multiplierLongitude , multiplierLatitude ). if volumeConserving is set, data are interpreted as heights above ellipsoid and the tesseroid volume is conserved, where is the radius of the ellipsoid at grid center and are the grid cell boundaries. This is meaninful for Digital Elevation Models (DEM). The fine grid can be written, where the first coarse grid values (data0) are additionally appended.', 'config_table': 'outputfileCoarseGridRectangular filename coarse grid outputfileFineGridRectangular filename fine grid with additional coarse grid values inputfileFineGridRectangular filename Digital Terrain Model multiplierLongitude uint Generalizing factor multiplierLatitude uint Generalizing factor volumeConserving boolean data are interpreted as heights above ellipsoid', 'display_text': 'Generate coarse grid by computing area weighted mean values. The number of points is decimated by averaging integer multiplies of grid points (multiplierLongitude, multiplierLatitude).
if volumeConserving is set, data are interpreted as heights above ellipsoid and the tesseroid volume \\[ V=\\int_r^{r+H}\\int_{\\varphi_1}^{\\varphi_2}\\int_{\\lambda_1}^{\\lambda_2} r^2\\cos\\varphi\\,d\\varphi\\,d\\lambda\\,dr \\]is conserved, where $r$ is the radius of the ellipsoid at grid center and $(\\varphi_1-\\varphi_2)\\times(\\lambda_1-\\lambda_2)$ are the grid cell boundaries. This is meaninful for Digital Elevation Models (DEM).
The fine grid can be written, where the first coarse grid values (data0) are additionally appended.'},
'GriddedDataTimeSeries2GriddedData': { 'name': 'GriddedDataTimeSeries2GriddedData', 'key': 'GriddedDataTimeSeries2GriddedData', 'description': 'Read a Read a and write for each epoch a gridded data file where the variableLoopTime and variableLoopIndex are expanded for each point of the given Read a to create the file name for this epoch (see text parser ). If Read a is not set the temporal nodal points from the inputfile are used. See also GriddedData2GriddedDataTimeSeries .', 'config_table': 'outputfilesGriddedData filename for each epoch variableLoopTime string variable with time of each epoch variableLoopIndex string variable with index of current epoch (starts with zero) variableLoopCount string variable with total number of epochs inputfileGriddedDataTimeSeries filename timeSeries timeSeriesType otherwise times from inputfile are used', 'display_text': 'Read a inputfileGriddedDataTimeSeries and write for each epoch a gridded data file where the variableLoopTime and variableLoopIndex are expanded for each point of the given timeSeries to create the file name for this epoch (see text parser).
If timeSeries is not set the temporal nodal points from the inputfile are used.
See also GriddedData2GriddedDataTimeSeries.'},
'GriddedDataTimeSeries2PotentialCoefficients': { 'name': 'GriddedDataTimeSeries2PotentialCoefficients', 'key': 'GriddedDataTimeSeries2PotentialCoefficients', 'description': 'This program estimate potential coefficients from This program estimate potential coefficients from in the same way as GriddedData2PotentialCoefficients but not only for one grid but for each epoch of This program estimate potential coefficients from of if not set for the temporal nodal points from the inputfile. The This program estimate potential coefficients from (one for each value ) are written for each epoch with the expansion of variableLoopTime and variableLoopIndex (see text parser ). See also GriddedData2PotentialCoefficients .', 'config_table': 'outputfilesPotentialCoefficients filename for each epoch variableLoopTime string variable with time of each epoch variableLoopIndex string variable with index of current epoch (starts with zero) variableLoopCount string variable with total number of epochs inputfileGriddedDataTimeSeries filename timeSeries timeSeriesType otherwise times from inputfile are used value expression expression (variables: longitude, latitude, height, area, data0, data1, ...) weight expression expression to compute values (input columns are named data0, data1, ...) kernel kernelType kernel in which the grid values are given minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius for potential coefficients leastSquares boolean false: quadrature formular, true: least squares adjustment order by order', 'display_text': 'This program estimate potential coefficients from inputfileGriddedDataTimeSeries in the same way as GriddedData2PotentialCoefficients but not only for one grid but for each epoch of timeSeries of if not set for the temporal nodal points from the inputfile. The outputfilePotentialCoefficients (one for each value) are written for each epoch with the expansion of variableLoopTime and variableLoopIndex (see text parser).
See also GriddedData2PotentialCoefficients.'},
'GriddedTopography2AtmospherePotentialCoefficients': { 'name': 'GriddedTopography2AtmospherePotentialCoefficients', 'key': 'GriddedTopography2AtmospherePotentialCoefficients', 'description': 'Estimate interior and exterior potential coefficients for atmosphere above digital terrain models. Coefficients for interior and exterior ( ) are computed. The density of the atmosphere is assumed to be (Sjöberg, 1998) where is the radial distance of the ellipsoid at each point, the radial height above the ellipsoid, is densitySeaLevel and nu is a constant factor. The density is integrated from radialLowerBound and upperAtmosphericBoundary above the ellipsoid. The radialLowerBound is typically the topography and can be computed as expression at every point from Estimate interior and exterior potential coefficients for atmosphere above digital terrain models. Coefficients for interior .', 'config_table': 'outputfilePotentialCoefficientsExterior filename outputfilePotentialCoefficientsInterior filename inputfileGriddedData filename Digital Terrain Model densitySeaLevel double [kg/m**3] ny double Constant for Atmosphere radialLowerBound expression expression (variables \'L\', \'B\', \'height\', \'data\', and \'area\' are taken from the gridded data upperAtmosphericBoundary double constant upper bound [m] factor double the result is multiplied by this factor, set -1 to subtract the field minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'Estimate interior and exterior potential coefficients for atmosphere above digital terrain models. Coefficients for interior $(1/r)^{n+1}$ and exterior ($r^n$) are computed. The density of the atmosphere is assumed to be (Sjöberg, 1998) \\[ \\rho_0\\left(\\frac{R}{R+h}\\right)^\\nu, \\]where $R$ is the radial distance of the ellipsoid at each point, $h$ the radial height above the ellipsoid, $\\rho_0$ is densitySeaLevel and nu $\\nu$ is a constant factor. The density is integrated from radialLowerBound and upperAtmosphericBoundary above the ellipsoid. The radialLowerBound is typically the topography and can be computed as expression at every point from inputfileGriddedData.'},
'GriddedTopography2PotentialCoefficients': { 'name': 'GriddedTopography2PotentialCoefficients', 'key': 'GriddedTopography2PotentialCoefficients', 'description': 'Estimate potential coefficients from digital terrain models. Coefficients for interior and exterior ( ) are computed.', 'config_table': 'outputfilePotentialCoefficients filename outputfilePotentialCoefficientsInterior filename inputfileGriddedData filename Digital Terrain Model density expression expression [kg/m^3] radialUpperBound expression expression (variables \'L\', \'B\', \'height\', \'data\', and \'area\' are taken from the gridded data radialLowerBound expression expression (variables \'L\', \'B\', \'height\', \'data\', and \'area\' are taken from the gridded data factor double the result is multiplied by this factor minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'Estimate potential coefficients from digital terrain models. Coefficients for interior $(1/r)^{n+1}$ and exterior ($r^n$) are computed.'},
'GriddedTopographyEllipsoidal2Radial': { 'name': 'GriddedTopographyEllipsoidal2Radial', 'key': 'GriddedTopographyEllipsoidal2Radial', 'description': 'Interpolate digital terrain models from ellipoidal heights to radial heights.', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename Digital Terrain Model', 'display_text': 'Interpolate digital terrain models from ellipoidal heights to radial heights.'},
'Matrix2GriddedData': { 'name': 'Matrix2GriddedData', 'key': 'Matrix2GriddedData', 'description': 'This program reads a matrix file with data in columns and convert into gridded data . The input columns are enumerated by data0 , data1 , , see dataVariables .', 'config_table': 'outputfileGriddedData filename inputfileMatrix filename points choice ellipsoidal sequence longitude expression expression latitude expression expression height expression expression cartesian sequence x expression expression y expression expression z expression expression area expression expression (e.g. deltaL*2*sin(deltaB/2)*cos(data1/RHO)) value expression expression sortPoints boolean sort from north/west to south east computeArea boolean the area can be computed automatically for rectangular grids R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program reads a matrix file with data in columns and convert into gridded data. The input columns are enumerated by data0, data1, , see dataVariables.'},
'MatrixRectangular2GriddedData': { 'name': 'MatrixRectangular2GriddedData', 'key': 'MatrixRectangular2GriddedData', 'description': 'Read gridded data (matrix).', 'config_table': 'outputfileGriddedData filename inputfileMatrix filename rowMajor boolean true: data is ordered row by row, false: columnwise startLongitude angle longitude of upper left corner of the grid startLatitude angle latitude of upper left corner of the grid deltaLongitude angle sampling, negative for east to west data deltaLatitude angle sampling, negative for south to north data R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'Read gridded data (matrix).'},
'TimeSeries2GriddedData': { 'name': 'TimeSeries2GriddedData', 'key': 'TimeSeries2GriddedData', 'description': 'Interpret the data columns of Interpret the data columns of as data points of a corresponding Interpret the data columns of . For each epoch a gridded data file is written where the variableLoopTime and variableLoopIndex are expanded for each point of the given time series to create the file name for this epoch (see text parser ). The number of input data columns must be a multiple of the number of grid points. If isGroupedDataByPoint is true the Interpret the data columns of starts with all data ( data0 , data1 ) for the first point, followed by all data of the second point and so on. If isGroupedDataByPoint is false, the file starts with data0 for all points, followed by all data1 and so on. See also GriddedData2TimeSeries .', 'config_table': 'outputfilesGriddedData filename for each epoch variableLoopTime string variable with time of each epoch variableLoopIndex string variable with index of current epoch (starts with zero) variableLoopCount string variable with total number of epochs inputfileTimeSeries filename each epoch: multiple data for points (MISCVALUES) grid gridType corresponding grid points isDataGroupedByPoint boolean multiple data are given point by point, otherwise: first data0 for all points, followed by all data1 R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'Interpret the data columns of inputfileTimeSeries as data points of a corresponding grid.
For each epoch a gridded data file is written where the variableLoopTime and variableLoopIndex are expanded for each point of the given time series to create the file name for this epoch (see text parser).
The number of input data columns must be a multiple of the number $n$ of grid points. If isGroupedDataByPoint is true the inputfileTimeSeries starts with all data (data0, data1) for the first point, followed by all data of the second point and so on. If isGroupedDataByPoint is false, the file starts with data0 for all points, followed by all data1 and so on.
See also GriddedData2TimeSeries.'},
'Instrument2AllanVariance': { 'name': 'Instrument2AllanVariance', 'key': 'Instrument2AllanVariance', 'description': 'This program computes the overlapping Allan variance from an This program computes the overlapping Allan variance from an . The estimate is averaged over all arcs (arcs are assumed to contain no data gaps). The overlapping Allan variance is defined as where is the averaging interval defined by the median sampling .', 'config_table': 'outputfileAllanVariance filename column 0: averaging interval [seconds], column 1-(n-1): Allan variance for each data column inputfileInstrument filename', 'display_text': 'This program computes the overlapping Allan variance from an inputfileInstrument. The estimate is averaged over all arcs (arcs are assumed to contain no data gaps).
The overlapping Allan variance is defined as \\[ \\sigma^2(m\\tau_0) = \\frac{1}{2(m\\tau_0)^2(N-2m)} \\sum_{n=1}^{N-2m}(x_{n+2m}-2x_{n+m}+x_n)^2, \\]where $m\\tau_0$ is the averaging interval defined by the median sampling $\\tau_0$.'},
'Instrument2CovarianceFunctionVCE': { 'name': 'Instrument2CovarianceFunctionVCE', 'key': 'Instrument2CovarianceFunctionVCE', 'description': 'This estimates a covariance function of This estimates a covariance function of for all selected columns with startDataFields and countDataFields . The estimation is performed robustly via variance component estimation. Bad arcs are downweigthed and the accuracies can be written with This estimates a covariance function of . The length of the covariance functions are determined by the longest arc. Additionaly the data can be detrended with This estimates a covariance function of and This estimates a covariance function of .', 'config_table': 'outputfileCovarianceFunction filename covariance functions outputfileSigmasPerArc filename accuracies of each arc outputfileResiduals filename outputfileSolution filename estimated parameter vector (global part only) inputfileInstrument filename startDataFields uint start countDataFields uint number of data fields (default: all after start) parameter parametrizationTemporalType data is reduced by temporal representation parameterPerArc parametrizationTemporalType data is reduced by temporal representation iterationCount uint number of iterations for the estimation', 'display_text': 'This estimates a covariance function of inputfileInstrument for all selected columns with startDataFields and countDataFields. The estimation is performed robustly via variance component estimation. Bad arcs are downweigthed and the accuracies can be written with outputfileSigmasPerArc. The length of the covariance functions are determined by the longest arc. Additionaly the data can be detrended with parameter and parameterPerArc.'},
'Instrument2CrossCorrelationFunction': { 'name': 'Instrument2CrossCorrelationFunction', 'key': 'Instrument2CrossCorrelationFunction', 'description': 'This program computes the cross correlation between all corresponding data columns in two instrument files . The instrument files must be synchronized ( InstrumentSynchronize ). The This program computes the cross correlation between all corresponding data columns in two is a matrix with the first column containing the time lag followed by cross-correlation function for each data column. The maximum lag is defined by the maximum arc length. The correlation is based on the unbiased estimate of the cross-covariance between data columns and , which is averaged over all arcs. From this estimate, the correlation for each lag is then computed via which is the ratio between the biased estimates of the cross-covariance at lag and the auto-covariance of the individual data columns. For instrument with data gaps, lag bins without any data are set to NAN.', 'config_table': 'outputfileCorrelation filename column 1: time lag, column 2..n cross-correlation inputfileInstrument filename inputfileInstrumentReference filename', 'display_text': 'This program computes the cross correlation between all corresponding data columns in two instrument files. The instrument files must be synchronized (InstrumentSynchronize). The outputfileCorrelation is a matrix with the first column containing the time lag followed by cross-correlation function for each data column. The maximum lag is defined by the maximum arc length.
The correlation is based on the unbiased estimate of the cross-covariance between data columns $x$ and $y$, \\[ \\sigma_{xy}(h) = \\frac{1}{N}\\sum_{k=1} x_{k+h} y_k, \\]which is averaged over all arcs. From this estimate, the correlation for each lag is then computed via \\[ r_{xy}(h) = \\frac{\\sigma_{xy}(h)}{\\sigma_x(0)\\sigma_y(0)}, \\]which is the ratio between the biased estimates of the cross-covariance at lag $h$ and the auto-covariance of the individual data columns.
For instrument with data gaps, lag bins without any data are set to NAN.'},
'Instrument2Histogram': { 'name': 'Instrument2Histogram', 'key': 'Instrument2Histogram', 'description': 'This program computes the arc-wise histogram from an instrument file . The output is a matrix with the first column containing the lower bound of each bin. The other columns contain the histograms for each arc.', 'config_table': 'outputfileMatrix filename column 1: lower bin bound; columns 2 to N: histogram of each arc inputfileInstrument filename selectDataField uint select channel for histogram computation binCount uint (default: Freedman-Diaconis\' choice, maximum of all channels) lowerBound expression lower bound for bins (default: global minimum, data values outside are ignored) upperBound expression upper bound for bins (default: global maximum, data values outside are ignored) relative boolean output relative frequencies cumulative boolean accumulate frequencies', 'display_text': 'This program computes the arc-wise histogram from an instrument file. The output is a matrix with the first column containing the lower bound of each bin. The other columns contain the histograms for each arc.
'},
'Instrument2PowerSpectralDensity': { 'name': 'Instrument2PowerSpectralDensity', 'key': 'Instrument2PowerSpectralDensity', 'description': 'This program computes the power spectral density (PSD) for all data fields in an instrument file . The PSD is computed using Lomb\'s method. For each arc and each frequency , a sinusoid is fit to the data The PSD for this frequency is then computed by forming the square sum of adjusted observations: The resulting PSD is the average over all arcs. For regularly sampled time series, this method yields the same results as FFT based PSD estimates. A regular frequency grid based on the longest arc and the median sampling is computed. The maximum number of epochs per arc is determined by the Nyquist frequency is given by If it is suspected that This program computes the power spectral density (PSD) for all data fields in an contains secular variations, the input should be detrended using InstrumentDetrend . See also Instrument2CovarianceFunctionVCE , CovarianceFunction2PowerSpectralDensity , PowerSpectralDensity2CovarianceFunction .', 'config_table': 'outputfilePSD filename estimated PSD: column 0: frequency vector, column 1-(n-1): PSD estimate for each channel inputfileInstrument filename', 'display_text': 'This program computes the power spectral density (PSD) for all data fields in an instrument file. The PSD is computed using Lomb\'s method. For each arc and each frequency $f$, a sinusoid is fit to the data \\[ l_i = a \\cos(2\\pi f t_i) + b \\sin(2\\pi f t_i) + e_i \\] The PSD for this frequency is then computed by forming the square sum of adjusted observations: \\[ P(f) = \\sum_i \\hat{l}^2_i. \\] The resulting PSD is the average over all arcs. For regularly sampled time series, this method yields the same results as FFT based PSD estimates.
A regular frequency grid based on the longest arc and the median sampling is computed. The maximum number of epochs per arc is determined by \\[ N = \\frac{t_{\\text{end}} - t_{\\text{start}}}{\\Delta t_{\\text{median}} } + 1, \\]the Nyquist frequency is given by \\[ f_{\\text{nyq}} = \\frac{1}{2\\Delta t_{\\text{median}}}. \\] If it is suspected that inputfileInstrument contains secular variations, the input should be detrended using InstrumentDetrend.
See also Instrument2CovarianceFunctionVCE, CovarianceFunction2PowerSpectralDensity, PowerSpectralDensity2CovarianceFunction.'},
'Instrument2RmsPlotGrid': { 'name': 'Instrument2RmsPlotGrid', 'key': 'Instrument2RmsPlotGrid', 'description': 'This program computes an RMS plot grid from one or more This program computes an RMS plot grid from one or more containing 3D data (e.g. orbits or station positions), which can then be plotted as gridded data in PlotGraph . The RMS is computed from the difference between This program computes an RMS plot grid from one or more and This program computes an RMS plot grid from one or more . All instrument files must be synchronized (see InstrumentSynchronize ). Each separate This program computes an RMS plot grid from one or more represents an entry (e.g. a satellite or station) in the resulting grid. Therefore, providing, for example, 32 orbit files of GPS satellites results in a grid with columns: mjd, id (0-31), rms. The first three data columns of the instrument data are considered for computation of the RMS values. The factor can be set to, for example, sqrt(3) to get 3D instead of 1D RMS values. If This program computes an RMS plot grid from one or more are provided, each This program computes an RMS plot grid from one or more and This program computes an RMS plot grid from one or more serves as a template with variable loopTime . This allows concatenation of instrument files, for example to create a month-long RMS plot grid from daily GPS orbit files (see below). Helmert parameters between the two frames can be estimated each epoch optionally if estimateShift , estimateScale , or estimateRotation are set. It uses a robust least squares adjustment .', 'config_table': 'outputfileRmsPlotGrid filename columns: mjd, id, rms outputfileHelmertTimeSeries filename columns: mjd, tx, ty, tz, scale, rx, ry, rz inputfileInstrument filename one file per satellite/station inputfileInstrumentReference filename one file per satellite/station, same order as above timeIntervals timeSeriesType for {loopTime} variable in inputfile factor double e.g. sqrt(3) for 3D RMS estimateShift boolean coordinate center every epoch estimateScale boolean scale factor of position every epoch estimateRotation boolean rotation every epoch huber double for robust least squares huberPower double for robust least squares huberMaxIteration uint (maximum) number of iterations for robust estimation', 'display_text': 'This program computes an RMS plot grid from one or more inputfileInstrument containing 3D data (e.g. orbits or station positions), which can then be plotted as gridded data in PlotGraph. The RMS is computed from the difference between inputfileInstrument and inputfileInstrumentReference. All instrument files must be synchronized (see InstrumentSynchronize).
Each separate inputfileInstrument represents an entry (e.g. a satellite or station) in the resulting grid. Therefore, providing, for example, 32 orbit files of GPS satellites results in a grid with columns: mjd, id (0-31), rms.
The first three data columns of the instrument data are considered for computation of the RMS values. The factor can be set to, for example, sqrt(3) to get 3D instead of 1D RMS values.
If timeIntervals are provided, each inputfileInstrument and inputfileInstrumentReference serves as a template with variable loopTime. This allows concatenation of instrument files, for example to create a month-long RMS plot grid from daily GPS orbit files (see below).
Helmert parameters between the two frames can be estimated each epoch optionally if estimateShift, estimateScale, or estimateRotation are set. It uses a robust least squares adjustment.
'},
'Instrument2Scaleogram': { 'name': 'Instrument2Scaleogram', 'key': 'Instrument2Scaleogram', 'description': 'This program computes the wavelet transform of a time series up to a maxLevel . The scalogram is written to a matrix which can be plotted by using a gridded layer in PlotGraph . Individual detail levels can be written to matrix files by setting This program computes the wavelet transform of a time series up to a . The data column to be decomposed must be set by selectDataField . The wavelet transform is implemented as a filter bank, so care should be taken when the input contains data gaps. Low/highpass wavelet filters are applied in forward and backward direction, input is padded symmetric. See This program computes the wavelet transform of a time series up to a for details.', 'config_table': 'outputfileScaleogram filename matrix columns: mjd, level, value outputfileLevels filename use loopLevel as variable inputfileInstrument filename inputfileWavelet filename wavelet coefficients selectDataField uint data column to transform maxLevel uint maximum level of decomposition (default: full)', 'display_text': 'This program computes the wavelet transform of a time series up to a maxLevel. The scalogram is written to a matrix which can be plotted by using a gridded layer in PlotGraph. Individual detail levels can be written to matrix files by setting outputfileLevels. The data column to be decomposed must be set by selectDataField.
The wavelet transform is implemented as a filter bank, so care should be taken when the input contains data gaps. Low/highpass wavelet filters are applied in forward and backward direction, input is padded symmetric. See digitalFilter for details.
'},
'Instrument2SpectralCoherence': { 'name': 'Instrument2SpectralCoherence', 'key': 'Instrument2SpectralCoherence', 'description': 'This program computes the spectral coherence between two instrument files . The (magnitude-squared) coherence is defined as and is a measure in the range [0, 1] for the similarity of the signals and in frequency domain. is the cross-spectral density between and and , are auto-spectral densities. Auto- and cross-spectral densities are computed using Lomb\'s method (see Instrument2PowerSpectralDensity for details). The resulting PSD is the average over all arcs. For regularly sampled time series, this method yields the same results as FFT based PSD estimates. A regular frequency grid based on the longest arc and the median sampling is computed. The maximum number of epochs per arc is determined by the Nyquist frequency is given by If it is suspected that This program computes the spectral coherence between two contains secular variations, the input should be detrended using InstrumentDetrend . The This program computes the spectral coherence between two contains a matrix with the frequency vector as first column, the coherence for each instrument channel is saved in the following columns.', 'config_table': 'outputfileCoherence filename column 1: frequency, column 2-n coherence inputfileInstrument filename inputfileInstrumentReference filename', 'display_text': 'This program computes the spectral coherence between two instrument files.
The (magnitude-squared) coherence is defined as \\[ C_{xy}(f) = \\frac{|P_{xy}(f)|^2}{P_{xx}(f)P_{yy}(f)} \\]and is a measure in the range [0, 1] for the similarity of the signals $x$ and $y$ in frequency domain. $P_{xy}$ is the cross-spectral density between $x$ and $y$ and $P_{xx}$, $P_{yy}$ are auto-spectral densities. Auto- and cross-spectral densities are computed using Lomb\'s method (see Instrument2PowerSpectralDensity for details).
The resulting PSD is the average over all arcs. For regularly sampled time series, this method yields the same results as FFT based PSD estimates.
A regular frequency grid based on the longest arc and the median sampling is computed. The maximum number of epochs per arc is determined by \\[ N = \\frac{t_{\\text{end}} - t_{\\text{start}}}{\\Delta t_{\\text{median}} } + 1, \\]the Nyquist frequency is given by \\[ f_{\\text{nyq}} = \\frac{1}{2\\Delta t_{\\text{median}}}. \\] If it is suspected that inputfileInstrument contains secular variations, the input should be detrended using InstrumentDetrend.
The outputfileCoherence contains a matrix with the frequency vector as first column, the coherence for each instrument channel is saved in the following columns.'},
'Instrument2Spectrogram': { 'name': 'Instrument2Spectrogram', 'key': 'Instrument2Spectrogram', 'description': 'This program applies the Short Time Fourier Transform (STFT) to selected data columns of This program applies the Short Time Fourier Transform (STFT) to selected data columns of and computes the spectrogram. The STFT is computed at centered This program applies the Short Time Fourier Transform (STFT) to selected data columns of with an (possible overlapping) rectangular window with windowLength seconds. Data gaps are zero padded within the window. The This program applies the Short Time Fourier Transform (STFT) to selected data columns of is a matrix with each row the time (MJD), the frequency , and the amplitudes for the selected data columns. It can be plotted with PlotGraph .', 'config_table': 'outputfileSpectrogram filename mjd, freq, ampl0, ampl1, ... inputfileInstrument filename timeSeries timeSeriesType center of SFFT window windowLength double [seconds] startDataFields uint start countDataFields uint number of data fields (default: all)', 'display_text': 'This program applies the Short Time Fourier Transform (STFT) to selected data columns of inputfileInstrument and computes the spectrogram. The STFT is computed at centered timeSeries with an (possible overlapping) rectangular window with windowLength seconds. Data gaps are zero padded within the window.
The outputfileSpectrogram is a matrix with each row the time (MJD), the frequency $[Hz]$, and the amplitudes $[unit/\\sqrt{Hz}]$ for the selected data columns. It can be plotted with PlotGraph.
'},
'InstrumentAccelerometer2ThermosphericDensity': { 'name': 'InstrumentAccelerometer2ThermosphericDensity', 'key': 'InstrumentAccelerometer2ThermosphericDensity', 'description': 'This program estimates neutral mass densities along the satellite trajectory based on accelerometer data . In order to determine the neutral mass density the accelerometer input should only reflect the accelerations due to drag (e.g. This program estimates neutral mass densities along the satellite trajectory based on ). Thus, influences from solar and Earth radiation pressure must be reduced beforehand.', 'config_table': 'outputfileDensity filename MISCVALUE (kg/m^3) satelliteModel filename satellite macro model inputfileOrbit filename inputfileStarCamera filename inputfileAccelerometer filename add non-gravitational forces in satellite reference frame thermosphere thermosphereType used to compute temperature and wind considerTemperature boolean compute drag and lift, otherwise simple drag coefficient is used considerWind boolean earthRotation earthRotationType ephemerides ephemeridesType', 'display_text': 'This program estimates neutral mass densities along the satellite trajectory based on accelerometer data. In order to determine the neutral mass density the accelerometer input should only reflect the accelerations due to drag (e.g. miscAccelerations:atmosphericDrag). Thus, influences from solar and Earth radiation pressure must be reduced beforehand.'},
'InstrumentAccelerometerApplyEstimatedParameters': { 'name': 'InstrumentAccelerometerApplyEstimatedParameters', 'key': 'InstrumentAccelerometerApplyEstimatedParameters', 'description': 'This program evaluates estimated satellite parameters and writes the result to an accelerometer file.', 'config_table': 'outputfileAccelerometer filename inputfileSatelliteModel filename satellite macro model inputfileOrbit filename inputfileStarCamera filename inputfileAccelerometer filename add non-gravitational forces in satellite reference frame earthRotation earthRotationType ephemerides ephemeridesType may be needed by parametrizationAcceleration parametrizationAcceleration parametrizationAccelerationType orbit force parameters inputfileParameter filename estimated orbit force parameters indexStart int position in the solution vector rightSide int if solution contains several right hand sides, select one factor double the result is multiplied by this factor', 'display_text': 'This program evaluates estimated satellite parameters and writes the result to an accelerometer file.'},
'InstrumentAccelerometerEstimateBiasScale': { 'name': 'InstrumentAccelerometerEstimateBiasScale', 'key': 'InstrumentAccelerometerEstimateBiasScale', 'description': 'This program calibrates This program calibrates with respect to simulated accelerometer data, see SimulateAccelerometer . The parameters This program calibrates of This program calibrates are estimated and the effect is reduced to calibrate the accelerometer data . If This program calibrates is given, the corresponding epochs (within marginThruster ) are not used for the parameter estimation, but the accelerometer epochs are still calibrated afterwards. An arbitrary instrument file is allowed here. The This program calibrates , This program calibrates , This program calibrates , This program calibrates , and This program calibrates are only needed for some special parametrizations.', 'config_table': 'outputfileAccelerometer filename outputfileSolution filename inputfileAccelerometer filename inputfileAccelerometerSim filename inputfileThruster filename remove thruster events marginThruster double margin size (on both sides) [seconds] inputfileOrbit filename inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType may be needed by parametrizationAcceleration inputfileSatelliteModel filename satellite macro model parametrizationAcceleration parametrizationAccelerationType', 'display_text': 'This program calibrates inputfileAccelerometer with respect to simulated accelerometer data, see SimulateAccelerometer. The parameters outputfileSolution of parametrizationAcceleration are estimated and the effect is reduced to calibrate the accelerometer data.
If inputfileThruster is given, the corresponding epochs (within marginThruster) are not used for the parameter estimation, but the accelerometer epochs are still calibrated afterwards. An arbitrary instrument file is allowed here.
The inputfileOrbit, inputfileStarCamera, earthRotation, ephemerides, and satelliteModel are only needed for some special parametrizations.'},
'InstrumentAccelerometerEstimateParameters': { 'name': 'InstrumentAccelerometerEstimateParameters', 'key': 'InstrumentAccelerometerEstimateParameters', 'description': 'This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation for the unknown parameters , where is given in This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation and is given in This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation . The parametrization of can be set via This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation . Optionally, the empirical covariance functions for the accelerations can be estimated by enabling estimateCovarianceFunctions . The estimated parameters are written to the file This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation and can be used by InstrumentAccelerometerApplyEstimatedParameters to calibrate accelerometer measurements.', 'config_table': 'outputfileSolution filename values for estimated parameters outputfileParameterNames filename names of the estimated parameters estimateArcSigmas sequence outputfileArcSigmas filename accuracies of each arc estimateEpochSigmas sequence outputfileEpochSigmas filename estimated epoch-wise sigmas estimateCovarianceFunctions sequence outputfileCovarianceFunction filename covariance functions for x, y, z direction inputfileAccelerometer filename inputfileAccelerometerReference filename if not given, reference acceleration is assumed zero inputfileOrbit filename may be needed by parametrizationAcceleration inputfileStarCamera filename may be needed by parametrizationAcceleration inputfileSatelliteModel filename satellite macro model (may be needed by parametrizationAcceleration) earthRotation earthRotationType may be needed by parametrizationAcceleration ephemerides ephemeridesType may be needed by parametrizationAcceleration parametrizationAcceleration parametrizationAccelerationType sigmaX double apriori accuracy in x-axis sigmaY double apriori accuracy in y-axis sigmaZ double apriori accuracy in z-axis iterationCount uint iteration count for determining the covariance function', 'display_text': 'This program estimates calibration parameters for acceleration data given given an optional reference acceleration. Specifically, the program solves the equation \\[ \\mathbf{a} - \\mathbf{a}_\\text{ref} = \\mathbf{f}(\\mathbf{x}) + \\mathbf{e} \\]for the unknown parameters $\\mathbf{x}$, where $\\mathbf{a}$ is given in inputfileAccelerometer and $\\mathbf{a}_\\text{ref}$ is given in inputfileAccelerometerReference. The parametrization of $\\mathbf{x}$ can be set via parametrizationAcceleration. Optionally, the empirical covariance functions for the accelerations $\\mathbf{a}$ can be estimated by enabling estimateCovarianceFunctions.
The estimated parameters are written to the file outputfileSolution and can be used by InstrumentAccelerometerApplyEstimatedParameters to calibrate accelerometer measurements.'},
'InstrumentApplyTimeOffset': { 'name': 'InstrumentApplyTimeOffset', 'key': 'InstrumentApplyTimeOffset', 'description': 'This program applies a This program applies a (MISCVALUE) to an This program applies a . The time offsets in seconds are multiplicated with a factor . The instrument files must be synchronized (see InstrumentSynchronize ).', 'config_table': 'outputfileInstrument filename inputfileInstrument filename inputfileTimeOffset filename MISCVALUE with time offset in seconds factor double applied to time offset', 'display_text': 'This program applies a inputfileTimeOffset (MISCVALUE) to an inputfileInstrument. The time offsets in seconds are multiplicated with a factor. The instrument files must be synchronized (see InstrumentSynchronize).'},
'InstrumentArcCalculate': { 'name': 'InstrumentArcCalculate', 'key': 'InstrumentArcCalculate', 'description': 'This program manipulates the data columns every arc of an instrument file similar to FunctionsCalculate , see there for more details. If several This program manipulates the data columns every arc of an s are given the data columns are copied side by side. For this the instrument files must be synchronized (see InstrumentSynchronize ). For the data columns the standard data variables are available, see dataVariables . For the time column (MJD) a variable epoch (together with epochmean , epochmin , ) is defined additionally. The content of This program manipulates the data columns every arc of an is controlled by outColumn . The number of outColumn must agree with the selected This program manipulates the data columns every arc of an . The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each epoch of the input. The variables data0 , data1 , are replaced by the according values from the input columns before. If no outColumn are specified all input columns are used instead directly. The This program manipulates the data columns every arc of an can be specified with outType and must be agree with the number of columns. An extra statistics file can be generated with one mid epoch per arc. For the computation of the outColumn values all dataVariables are available (e.g. epochmin , data0mean , data1std , ) inclusively the constant s and estimated parameter s but without the data0 , data1 , itself. The variables and the numbering of the columns refers to the This program manipulates the data columns every arc of an . See also FunctionsCalculate , MatrixCalculate .', 'config_table': 'outputfileInstrument filename inputfileInstrument filename data columns are appended to the right constant expression define a constant by name=value parameter expression define a parameter by name[=value] leastSquares expression try to minimize the expression by adjustment of the parameters removalCriteria expression row is removed if one criterion evaluates true. outType instrumentTypeType outColumn expression expression of output columns, extra \'epoch\' variable statistics sequence outputfileInstrument filename instrument file with mid epoch per arc, data columns are user defined outColumn expression expression to compute statistics columns, data* are from outColumn', 'display_text': 'This program manipulates the data columns every arc of an instrument file similar to FunctionsCalculate, see there for more details. If several inputfileInstruments are given the data columns are copied side by side. For this the instrument files must be synchronized (see InstrumentSynchronize). For the data columns the standard data variables are available, see dataVariables. For the time column (MJD) a variable epoch (together with epochmean, epochmin, ) is defined additionally.
The content of outputfileInstrument is controlled by outColumn. The number of outColumn must agree with the selected outType. The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each epoch of the input. The variables data0, data1, are replaced by the according values from the input columns before. If no outColumn are specified all input columns are used instead directly. The instrument type can be specified with outType and must be agree with the number of columns.
An extra statistics file can be generated with one mid epoch per arc. For the computation of the outColumn values all dataVariables are available (e.g. epochmin, data0mean, data1std, ) inclusively the constants and estimated parameters but without the data0, data1, itself. The variables and the numbering of the columns refers to the outputfileInstrument.
See also FunctionsCalculate, MatrixCalculate.'},
'InstrumentArcCrossStatistics': { 'name': 'InstrumentArcCrossStatistics', 'key': 'InstrumentArcCrossStatistics', 'description': 'Computes statistics of selected data columns between two instrument files arc wise. The Computes statistics of selected data columns between two contains for every arc one (mid) epoch with statistics column(s). Possible statistics are Correlation Error RMS Nash-Sutcliffe coefficient (NSC) With removeArcMean the mean of each data column of each arc is reduced before. With perColumn separate statistics for each selected data column are computed, otherwise an overall value is computed. See also InstrumentArcStatistics , InstrumentStatisticsTimeSeries .', 'config_table': 'outputfileStatisticsTimeSeries filename statistics column(s) per arc, MISCVALUES inputfileInstrument filename inputfileInstrumentReference filename statistics choice correlation errorRMS rms of differences nashSutcliffe with respect to reference field removeArcMean boolean startDataFields uint start countDataFields uint number of data fields (default: all) perColumn boolean compute statistic per column', 'display_text': 'Computes statistics of selected data columns between two instrument files arc wise. The outputfileStatisticsTimeSeries contains for every arc one (mid) epoch with statistics column(s). Possible statistics are
With removeArcMean the mean of each data column of each arc is reduced before.
With perColumn separate statistics for each selected data column are computed, otherwise an overall value is computed.
See also InstrumentArcStatistics, InstrumentStatisticsTimeSeries.'},
'InstrumentArcStatistics': { 'name': 'InstrumentArcStatistics', 'key': 'InstrumentArcStatistics', 'description': 'Computes statistics of selected data columns of Computes statistics of selected data columns of arc wise. The Computes statistics of selected data columns of contains for every arc one (mid) epoch with statistics column(s). Possible statistics are root mean square, standard deviation, mean, median, min, and max. With perColumn separate statistics for each selected data column are computed, otherwise an overall value is computed. See also InstrumentArcCrossStatistics , InstrumentStatisticsTimeSeries .', 'config_table': 'outputfileStatisticsTimeSeries filename columns: mjd, statistics column(s) per instrument file inputfileInstrument filename statistics choice rootMeanSquare standardDeviation mean median min max epochCount startDataFields uint start countDataFields uint number of data fields (default: all) perColumn boolean compute statistic per column ignoreNan boolean ignore NaN values in input', 'display_text': 'Computes statistics of selected data columns of inputfileInstrument arc wise. The outputfileStatisticsTimeSeries contains for every arc one (mid) epoch with statistics column(s). Possible statistics are root mean square, standard deviation, mean, median, min, and max.
With perColumn separate statistics for each selected data column are computed, otherwise an overall value is computed.
See also InstrumentArcCrossStatistics, InstrumentStatisticsTimeSeries.'},
'InstrumentConcatenate': { 'name': 'InstrumentConcatenate', 'key': 'InstrumentConcatenate', 'description': 'This program concatenate the arcs from several instrument files and write it to a new file . Input files must be of the same type. The arcs are merged to one arc even though there is a gap inbetween. To split the data into arcs use InstrumentSynchronize . Three options are available: sort , removeDuplicates and checkForNaNs . If sort is enabled, the program reads all files, no matter if they are sorted correctly in time, and then sorts the epochs. If removeDuplicates is enabled, the program checks the whole data set for epochs that are contained twice. And if checkForNaNs is enabled the data set is checked for invalid epochs containing NaNs.', 'config_table': 'outputfile filename inputfile filename sort boolean sort epochs with increasing time removeDuplicates choice remove duplicate epochs keepFirst sequence keep first epoch with the same time stamp, remove all others margin double margin for identical times [seconds] keepLast sequence keep last epoch with the same time stamp, remove all others margin double margin for identical times [seconds] checkForNaNs boolean remove epochs with NaN values in one of the data fields', 'display_text': 'This program concatenate the arcs from several instrument files and write it to a new file. Input files must be of the same type. The arcs are merged to one arc even though there is a gap inbetween. To split the data into arcs use InstrumentSynchronize. Three options are available: sort, removeDuplicates and checkForNaNs. If sort is enabled, the program reads all files, no matter if they are sorted correctly in time, and then sorts the epochs. If removeDuplicates is enabled, the program checks the whole data set for epochs that are contained twice. And if checkForNaNs is enabled the data set is checked for invalid epochs containing NaNs.'},
'InstrumentCovarianceCheck': { 'name': 'InstrumentCovarianceCheck', 'key': 'InstrumentCovarianceCheck', 'description': 'This program checks This program checks 3x3 covariance matrices if they are invertible or not and removes the invalid epochs.', 'config_table': 'outputfileCovariance3d filename inputfileCovariance3d filename', 'display_text': 'This program checks inputfileCovariance3d 3x3 covariance matrices if they are invertible or not and removes the invalid epochs.'},
'InstrumentDetrend': { 'name': 'InstrumentDetrend', 'key': 'InstrumentDetrend', 'description': 'Reduces Reduces (e.g. const, trend, polynomial) per arc from selected data columns of Reduces using a robust robust least squares adjustment . The Reduces contains for every arc one (mid) epoch with the estimated parameters. The order is: first all data ( data0 , data1 , ) of first temporal parameter, followed by all data of the second temporal parameter and so on.', 'config_table': 'outputfileInstrument filename detrended instrument time series outputfileTimeSeriesArcParameters filename time series of estimated parameters per arc inputfileInstrument filename parametrizationTemporal parametrizationTemporalType per arc, data is reduced by temporal representation startDataFields uint start countDataFields uint number of data fields (default: all after start) huber double for robust least squares huberPower double for robust least squares huberMaxIteration uint (maximum) number of iterations for robust estimation', 'display_text': 'Reduces parametrizationTemporal (e.g. const, trend, polynomial) per arc from selected data columns of inputfileInstrument using a robust robust least squares adjustment.
The outputfileTimeSeriesArcParameters contains for every arc one (mid) epoch with the estimated parameters. The order is: first all data (data0, data1, ) of first temporal parameter, followed by all data of the second temporal parameter and so on.'},
'InstrumentEarthRotation': { 'name': 'InstrumentEarthRotation', 'key': 'InstrumentEarthRotation', 'description': 'Precompute Earth rotation matrix from celestial to terrestrial frame and save as StarCamera file .', 'config_table': 'outputfileStarCamera filename rotation from CRF to TRF earthRotation earthRotationType timeSeries timeSeriesType', 'display_text': 'Precompute Earth rotation matrix from celestial to terrestrial frame and save as StarCamera file.'},
'InstrumentEstimateEmpiricalCovariance': { 'name': 'InstrumentEstimateEmpiricalCovariance', 'key': 'InstrumentEstimateEmpiricalCovariance', 'description': 'This program estimates the empirical auto- and cross-covariance of selected data columns per arc of This program estimates the empirical auto- and cross-covariance of selected data columns per arc of . The maximum computed lag is determined by the number of This program estimates the empirical auto- and cross-covariance of selected data columns per arc of specified (for a single output file only the auto-covariance is determined, for two output files auto- and cross-covariance is computed and so on). Stationarity is assumed for the input time series, which means the temporal covariance matrix has Toeplitz structure. The matrix for lag describes the covariance between and , i.e. . To get a reliable estimate, InstrumentDetrend should be called first.', 'config_table': 'outputfileCovarianceMatrix filename inputfileInstrument filename startDataFields uint start countDataFields uint number of data fields (default: all after start)', 'display_text': 'This program estimates the empirical auto- and cross-covariance of selected data columns per arc of inputfileInstrument. The maximum computed lag is determined by the number of outputfileCovarianceMatrix specified (for a single output file only the auto-covariance is determined, for two output files auto- and cross-covariance is computed and so on).
Stationarity is assumed for the input time series, which means the temporal covariance matrix has Toeplitz structure. \\[ \\begin{bmatrix} \\Sigma & \\Sigma_{\\Delta_1} & \\Sigma_{\\Delta_2} & \\Sigma_{\\Delta_3} & \\Sigma_{\\Delta_4} \\\\ & \\Sigma & \\Sigma_{\\Delta_1} & \\Sigma_{\\Delta_2} & \\Sigma_{\\Delta_3} \\\\ & & \\Sigma & \\Sigma_{\\Delta_1} & \\Sigma_{\\Delta_2} \\\\ & & & \\Sigma & \\Sigma_{\\Delta_1} \\\\ & & & & \\Sigma \\\\ \\end{bmatrix} \\] The matrix for lag $h$ describes the covariance between $x_{t-h}$ and $x_{t}$, i.e. $\\Sigma(t-h, t)$.
To get a reliable estimate, InstrumentDetrend should be called first.'},
'InstrumentEstimateHelmertTransformation': { 'name': 'InstrumentEstimateHelmertTransformation', 'key': 'InstrumentEstimateHelmertTransformation', 'description': 'This program estimates a 3D Helmert transformation between two networks (frame realizations, e.g. GNSS satellite or station network). Each separate data represents a satellite/station/ (e.g. 32 GPS satellites). The instrument data (x,y,z position) considered can be set with startData . The Helmert parameters are set up according to This program estimates a 3D Helmert transformation between two networks (frame realizations, e.g. GNSS satellite or station network). Each separate for each This program estimates a 3D Helmert transformation between two networks (frame realizations, e.g. GNSS satellite or station network). Each separate and are estimated using a robust least squares adjustment .', 'config_table': 'outputfileHelmertTimeSeries filename columns: mjd, Tx,Ty,Tz,s,Rx,Ry,Rz according to temporal parametrization data sequence e.g. satellite, station outputfileInstrument filename transformed positions as instrument type Vector3d outputfileInstrumentDiff filename position difference as instrument type Vector3d inputfileInstrument filename inputfileInstrumentReference filename startDataFields uint start index of position (x,y,z) columns timeIntervals timeSeriesType parameters are estimated per interval parametrizationTemporal parametrizationTemporalType temporal parametrization estimateShift boolean coordinate center estimateScale boolean scale factor of position estimateRotation boolean rotation huber double for robust least squares huberPower double for robust least squares huberMaxIteration uint (maximum) number of iterations for robust estimation', 'display_text': 'This program estimates a 3D Helmert transformation between two networks (frame realizations, e.g. GNSS satellite or station network). Each separate data represents a satellite/station/(e.g. 32 GPS satellites). The instrument data (x,y,z position) considered can be set with startData. The Helmert parameters are set up according to parametrizationTemporal for each timeIntervals and are estimated using a robust least squares adjustment.'},
'InstrumentFilter': { 'name': 'InstrumentFilter', 'key': 'InstrumentFilter', 'description': 'This program filter selected data columns of This program filter selected data columns of with This program filter selected data columns of arc wise.', 'config_table': 'outputfileInstrument filename inputfileInstrument filename digitalFilter digitalFilterType startDataFields uint start countDataFields uint number of data fields (default: all after start)', 'display_text': 'This program filter selected data columns of inputfileInstrument with digitalFilter arc wise.'},
'InstrumentInsertNAN': { 'name': 'InstrumentInsertNAN', 'key': 'InstrumentInsertNAN', 'description': 'This program inserts NAN epochs into This program inserts NAN epochs into files, either at specific This program inserts NAN epochs into or where gaps in the instrument are detected.', 'config_table': 'outputfileInstrument filename inputfileInstrument filename times timeSeriesType Insert NAN at specific times. atGaps boolean Insert NAN where epochs are more than 1.5 times the median sampling apart. atArcEnds boolean Insert one epoch with data NAN at arc ends', 'display_text': 'This program inserts NAN epochs into inputfileInstrument files, either at specific times or where gaps in the instrument are detected.'},
'InstrumentMultiplyAdd': { 'name': 'InstrumentMultiplyAdd', 'key': 'InstrumentMultiplyAdd', 'description': 'This program multiply instrument data with a factor and add them together. Afterwards the mean of each arc and data column can be removed with removeArcMean . The instrument files must be synchronized ( InstrumentSynchronize ). See also InstrumentArcCalculate .', 'config_table': 'outputfileInstrument filename instrument sequence inputfileInstrument filename factor double removeArcMean boolean remove mean value of each arc', 'display_text': 'This program multiply instrument data with a factor and add them together. Afterwards the mean of each arc and data column can be removed with removeArcMean. The instrument files must be synchronized (InstrumentSynchronize).
See also InstrumentArcCalculate.'},
'InstrumentReduceSampling': { 'name': 'InstrumentReduceSampling', 'key': 'InstrumentReduceSampling', 'description': 'This program reduce the sampling of a instrument file. Only epochs with a time stamp with a division by sampling without remainder are kept (inside margin ).', 'config_table': 'outputfileInstrument filename inputfileInstrument filename sampling double new sampling in seconds margin double margin around the new sampling in seconds relative2FirstEpoch boolean compute sampling relative to time of first epoch', 'display_text': 'This program reduce the sampling of a instrument file. Only epochs with a time stamp with a division by sampling without remainder are kept (inside margin).'},
'InstrumentRemoveEpochsByCriteria': { 'name': 'InstrumentRemoveEpochsByCriteria', 'key': 'InstrumentRemoveEpochsByCriteria', 'description': 'This program removes epochs from This program removes epochs from by evaluating a set of removalCriteria expressions. For the data columns the standard data variables are available, see dataVariables . The instrument data can be reduced by data from This program removes epochs from prior to evaluation of the expressions. To reduce the data by its median, use an expression like data1-data1mean . To remove epochs that deviate by more than 3 sigma use abs(data1)>3*data1std or abs(data0-data0median)>3*1.4826*data0mad . All arcs in the input instrument file are concatenated, meaning expressions like data1mean refer to the complete dataset. The removed epochs can be saved in a separate This program removes epochs from .', 'config_table': 'outputfileInstrument filename all data is stored in one arc outputfileInstrumentRemovedEpochs filename all data is stored in one arc inputfileInstrument filename arcs are concatenated for processing inputfileInstrumentReference filename if given, the reference data is reduced prior to the expressions being evaluated removalCriteria expression epochs are removed if one criterion evaluates true. data0 is the first data field. margin double remove data around identified epochs (on both sides) [seconds]', 'display_text': 'This program removes epochs from inputfileInstrument by evaluating a set of removalCriteria expressions. For the data columns the standard data variables are available, see dataVariables.
The instrument data can be reduced by data from inputfileInstrumentReference prior to evaluation of the expressions.
To reduce the data by its median, use an expression like data1-data1mean. To remove epochs that deviate by more than 3 sigma use abs(data1)>3*data1std or abs(data0-data0median)>3*1.4826*data0mad.
All arcs in the input instrument file are concatenated, meaning expressions like data1mean refer to the complete dataset. The removed epochs can be saved in a separate outputfileInstrumentRemovedEpochs.'},
'InstrumentRemoveEpochsByTimes': { 'name': 'InstrumentRemoveEpochsByTimes', 'key': 'InstrumentRemoveEpochsByTimes', 'description': 'This program compares an instrument file with a This program compares an . Epochs contained within the time series (including a defined margin) are removed from the instrument file. The margin is added on both sides of the epochs. The arcs of the instrument file are concatenated to one arc. The removed epochs can be saved in a separate instrument file.', 'config_table': 'outputfileInstrument filename all epochs are concatenated in one arc outputfileInstrumentRemovedEpochs filename all epochs are concatenated in one arc inputfileInstrument filename timePoints timeSeriesType margin double margin size (on both sides) [seconds]', 'display_text': 'This program compares an instrument file with a time series. Epochs contained within the time series (including a defined margin) are removed from the instrument file. The margin is added on both sides of the epochs. The arcs of the instrument file are concatenated to one arc. The removed epochs can be saved in a separate instrument file.'},
'InstrumentRemoveEpochsThruster': { 'name': 'InstrumentRemoveEpochsThruster', 'key': 'InstrumentRemoveEpochsThruster', 'description': 'This program remove epochs from an instrument file . The epochs are defined by a thruster file plus a defined margin before and after the thruster firings. The arcs of the instrument file are concatenated to one arc. The removed epochs can be saved in a separate instrument file.', 'config_table': 'outputfileInstrument filename all epochs are concatenated in one arc outputfileInstrumentRemovedEpochs filename all epochs are concatenated in one arc inputfileInstrument filename inputfileThruster filename THRUSTER marginBefore double margin before start of firing [seconds] marginAfter double margin after end of firing [seconds]', 'display_text': 'This program remove epochs from an instrument file. The epochs are defined by a thruster file plus a defined margin before and after the thruster firings. The arcs of the instrument file are concatenated to one arc. The removed epochs can be saved in a separate instrument file.'},
'InstrumentResample': { 'name': 'InstrumentResample', 'key': 'InstrumentResample', 'description': 'This program resamples instrument data to a given This program resamples using a resampling This program resamples . This program can also be used to reduce the sampling of an instrument file, but a better way to reduce the sampling of noisy data with regular sampling is to use a low pass filter first with InstrumentFilter and then thin out the data with InstrumentReduceSampling .', 'config_table': 'outputfileInstrument filename inputfileInstrument filename method interpolatorTimeSeriesType resampling method timeSeries timeSeriesType resampled points in time', 'display_text': 'This program resamples instrument data to a given timeSeries using a resampling method.
This program can also be used to reduce the sampling of an instrument file, but a better way to reduce the sampling of noisy data with regular sampling is to use a low pass filter first with InstrumentFilter and then thin out the data with InstrumentReduceSampling.'},
'InstrumentRotate': { 'name': 'InstrumentRotate', 'key': 'InstrumentRotate', 'description': 'This program rotates instrument data into a new reference frame (using This program rotates ). The rotation is usually done from satellite frame into inertial frame. To apply Earth rotation to orbits use Orbit2EarthFixedOrbit For other instrument data use InstrumentEarthRotation before.', 'config_table': 'outputfileInstrument filename inputfileInstrument filename inputfileStarCamera filename inverseRotate boolean', 'display_text': 'This program rotates instrument data into a new reference frame (using inputfileStarCamera). The rotation is usually done from satellite frame into inertial frame.
To apply Earth rotation to orbits use Orbit2EarthFixedOrbit For other instrument data use InstrumentEarthRotation before.'},
'InstrumentSetType': { 'name': 'InstrumentSetType', 'key': 'InstrumentSetType', 'description': 'Convert instrument data into instrument data with new Convert . The selected number of data columns must agree with the Convert .', 'config_table': 'outputfileInstrument filename inputfileInstrument filename type instrumentTypeType startDataFields uint start countDataFields uint number of data fields (default: all after start)', 'display_text': 'Convert instrument data into instrument data with new type. The selected number of data columns must agree with the type.'},
'InstrumentStarCamera2AccAngularRate': { 'name': 'InstrumentStarCamera2AccAngularRate', 'key': 'InstrumentStarCamera2AccAngularRate', 'description': 'This program derivate from a time series of quaternions a series of angular rates and angular accelerations. The derivatives are computed by a polynomial interpolation with interpolationDegree of the quaternions.', 'config_table': 'outputfileAngularRate filename [rad/s], VECTOR3D outputfileAngularAcc filename [rad/s**2], VECTOR3D inputfileStarCamera filename interpolationDegree uint derivation by polynomial interpolation of degree n', 'display_text': 'This program derivate from a time series of quaternions a series of angular rates and angular accelerations. The derivatives are computed by a polynomial interpolation with interpolationDegree of the quaternions.'},
'InstrumentStarCamera2RollPitchYaw': { 'name': 'InstrumentStarCamera2RollPitchYaw', 'key': 'InstrumentStarCamera2RollPitchYaw', 'description': 'Compute roll, pitch, yaw angles from Compute roll, pitch, yaw angles from data. Optional the angles are computed relative to a Compute roll, pitch, yaw angles from . See also SimulateStarCamera .', 'config_table': 'outputfileInstrument filename roll, pitch, yaw [rad], VECTOR3D inputfileStarCamera filename inputfileStarCameraReference filename nominal orientation', 'display_text': 'Compute roll, pitch, yaw angles from inputfileStarCamera data. Optional the angles are computed relative to a inputfileStarCameraReference.
See also SimulateStarCamera.'},
'InstrumentStarCamera2RotaryMatrix': { 'name': 'InstrumentStarCamera2RotaryMatrix', 'key': 'InstrumentStarCamera2RotaryMatrix', 'description': 'Write Write rotations as Write rotary matrices (for each epoch ).', 'config_table': 'outputfileInstrument filename xx, xy, xz, yx, yy, yz, zx, zy, zz (MISCVALUES) inputfileStarCamera filename', 'display_text': 'Write inputfileStarCamera rotations as outputfileInstrument rotary matrices (for each epoch $xx, xy, xz, yx, yy, yz, zx, zy, zz$).'},
'InstrumentStarCameraMultiply': { 'name': 'InstrumentStarCameraMultiply', 'key': 'InstrumentStarCameraMultiply', 'description': 'This program applies several rotations given by This program applies several rotations given by . The resulting rotation is written as This program applies several rotations given by . All instrument files must be synchronized ( InstrumentSynchronize ).', 'config_table': 'outputfileStarCamera filename instrument sequence inputfileStarCamera filename inverse boolean', 'display_text': 'This program applies several rotations given by inputfileStarCamera. The resulting rotation is written as outputfileStarCamera. All instrument files must be synchronized (InstrumentSynchronize).'},
'InstrumentStatisticsTimeSeries': { 'name': 'InstrumentStatisticsTimeSeries', 'key': 'InstrumentStatisticsTimeSeries', 'description': 'This program computes a time series of statistics for one or more instrument files. Possible statistics are root mean square, standard deviation, mean, median, min, and max. The columns of the output time series are defined either as one per This program computes a time series of statistics for one or more instrument files. Possible statistics are root mean square, standard deviation, mean, median, min, and max. The columns of the output time series are defined either as one per or, if perColumn is true, statistics are computed per column for each file. Providing e.g. 32 orbit files of GPS satellites results in a time series matrix with columns: mjd, statisticsG01, statisticsG02, ..., statisticsG32. If intervals are provided, the input data is split into these intervals and one statistic is computed per interval. Otherwise, overall statistics are computed. The instrument data considered for computation of the component-wise statistics can be set with startDataFields and countDataFields . The factor can be set to e.g. sqrt(3) to get 3D instead of 1D RMS values. See also InstrumentArcStatistics , InstrumentArcCrossStatistics .', 'config_table': 'outputfileStatisticsTimeSeries filename columns: mjd, statistics column(s) per instrument file inputfileInstrument filename statistics choice rootMeanSquare standardDeviation mean median sum min max epochCount startDataFields uint start countDataFields uint number of data fields (default: all) perColumn boolean compute statistic per column ignoreNan boolean ignore NaN values in statistic computation intervals timeSeriesType intervals for statistics computation (one statistic per interval) factor double e.g. sqrt(3) for 3D RMS', 'display_text': 'This program computes a time series of statistics for one or more instrument files. Possible statistics are root mean square, standard deviation, mean, median, min, and max. The columns of the output time series are defined either as one per inputfileInstrument or, if perColumn is true, statistics are computed per column for each file. Providing e.g. 32 orbit files of GPS satellites results in a time series matrix with columns: mjd, statisticsG01, statisticsG02, ..., statisticsG32. If intervals are provided, the input data is split into these intervals and one statistic is computed per interval. Otherwise, overall statistics are computed. The instrument data considered for computation of the component-wise statistics can be set with startDataFields and countDataFields. The factor can be set to e.g. sqrt(3) to get 3D instead of 1D RMS values.
See also InstrumentArcStatistics, InstrumentArcCrossStatistics.'},
'InstrumentSynchronize': { 'name': 'InstrumentSynchronize', 'key': 'InstrumentSynchronize', 'description': 'This program reads several instrument files and synchronize the data. Every epoch with some missing data will be deleted so the remaining epochs have data from every instrument. In a second step the epochs are divided into arcs with maximal epochs (or maxArcLen ) without having a gap inside an arc. A Gap is defined by a time step with at least minGap seconds between consecutive epochs or if not set the 1.5 of the median sampling. Arc with an epoch count less than minArcLen will be rejected. A specific region can be selected with This program reads several . In this case one of the instrument data must be an orbit. If This program reads several is given the data are also divided into time bins. The assignment of arcs to the bins can be saved in This program reads several . This file can be used for the variational equation approach or KalmanBuildNormals . Instrument files from irregularData are not synchronized but divided into the same number of arcs within the same time intervals. Data outside the defined arcs will be deleted.', 'config_table': 'data sequence outputfileInstrument filename inputfileInstrument filename margin double margin for identical times [seconds] minGap double minimal time to define a gap and to begin a new arc, 0: no dividing [seconds], if not set 1.5*median sampling is used minArcLength uint minimal number of epochs of an arc maxArcLength uint maximal number of epochs of an arc arcType choice all arcs or only ascending or descending arcs are selected ascending descending border borderType only data in a specific region is selected timeIntervals timeSeriesType divide data into time bins outputfileArcList filename arc and time bin mapping irregularData sequence instrument files with irregular sampling outputfileInstrument filename inputfileInstrument filename minArcLength uint minimal number of epochs in an arc', 'display_text': 'This program reads several instrument files and synchronize the data. Every epoch with some missing data will be deleted so the remaining epochs have data from every instrument.
In a second step the epochs are divided into arcs with maximal epochs (or maxArcLen) without having a gap inside an arc. A Gap is defined by a time step with at least minGap seconds between consecutive epochs or if not set the 1.5 of the median sampling. Arc with an epoch count less than minArcLen will be rejected.
A specific region can be selected with border. In this case one of the instrument data must be an orbit.
If timeIntervals is given the data are also divided into time bins. The assignment of arcs to the bins can be saved in outputfileArcList. This file can be used for the variational equation approach or KalmanBuildNormals.
Instrument files from irregularData are not synchronized but divided into the same number of arcs within the same time intervals. Data outside the defined arcs will be deleted.'},
'InstrumentWaveletDecomposition': { 'name': 'InstrumentWaveletDecomposition', 'key': 'InstrumentWaveletDecomposition', 'description': 'This program performs a multilevel one-dimensional wavelet analysis on one selectDataField data column of This program performs a multilevel one-dimensional wavelet analysis on one . The This program performs a multilevel one-dimensional wavelet analysis on one contains the decomposed levels in time domain', 'config_table': 'outputfileInstrument filename MISCVALUES, decomposed levels in time domain a_J,d_J,...,d_1 inputfileInstrument filename selectDataField uint select a data column for decomposition inputfileWavelet filename wavelet coefficients level uint level of decomposition', 'display_text': 'This program performs a multilevel one-dimensional wavelet analysis on one selectDataField data column of inputfileInstrument. The outputfileInstrument contains the decomposed levels in time domain ${a_J,d_J,...,d_1}$'},
'LocalLevelFrame2StarCamera': { 'name': 'LocalLevelFrame2StarCamera', 'key': 'LocalLevelFrame2StarCamera', 'description': 'Compute rotation ( StarCamera file ) from local level frame (ellipsoidal north, east, down) to TRF for positions given in Compute rotation ( (first 3 data columns).', 'config_table': 'outputfileStarCamera filename rotation matrix from local level frame (ellipsoidal north, east, down) to TRF inputfileInstrument filename origin of local level frame constantOriginPerArc boolean use constant origin for all epochs of an arc (median position) R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates, 0: spherical coordinates', 'display_text': 'Compute rotation (StarCamera file) from local level frame (ellipsoidal north, east, down) to TRF for positions given in inputfileInstrument (first 3 data columns).'},
'KalmanBuildNormals': { 'name': 'KalmanBuildNormals', 'key': 'KalmanBuildNormals', 'description': 'This program sets up normal equations based on This program sets up normal equations based on for short-term gravity field variations. It computes the normal equations based on the intervals given in the This program sets up normal equations based on . It sets up the least squares adjustment and subsequently computes the normal equations for each interval. If eliminateNonGravityParameters is true, all non-gravity parameters are eliminated before the normals are written to This program sets up normal equations based on . For each time interval in arcList a single normal equation file is written. This program computes the input normals for KalmanFilter and KalmanSmootherLeastSquares .', 'config_table': 'outputfileNormalEquation filename outputfile for normal equations observation observationType inputfileArcList filename list to correspond points of time to arc numbers eliminateNonGravityParameters boolean eliminate additional parameters from normals, 0: all parameter are saved', 'display_text': 'This program sets up normal equations based on observation for short-term gravity field variations. It computes the normal equations based on the intervals $i \\in \\{1, ..., N\\}$ given in the arcList. It sets up the least squares adjustment \\[ \\begin{bmatrix} \\mathbf{l}_1 \\\\ \\mathbf{l}_2 \\\\ \\vdots \\\\ \\mathbf{l}_N \\\\ \\end{bmatrix} = \\begin{bmatrix} \\mathbf{A}_1 & & & \\\\ & \\mathbf{A}_2 & &\\\\ & & \\ddots & \\\\ & & & \\mathbf{A}_N \\\\ \\end{bmatrix} \\begin{bmatrix} \\mathbf{x}^{(1)} \\\\ \\mathbf{x}^{(2)} \\\\ \\vdots \\\\ \\mathbf{x}^{(N)} \\\\ \\end{bmatrix} + \\begin{bmatrix} \\mathbf{e}_1 \\\\ \\mathbf{e}_2 \\\\ \\vdots \\\\ \\mathbf{e}_N \\\\ \\end{bmatrix}, \\]and subsequently computes the normal equations $\\mathbf{N}_i, \\mathbf{n}_i$ for each interval. If eliminateNonGravityParameters is true, all non-gravity parameters are eliminated before the normals are written to outputfileNormalEquation. For each time interval in arcList a single normal equation file is written.
This program computes the input normals for KalmanFilter and KalmanSmootherLeastSquares.'},
'KalmanFilter': { 'name': 'KalmanFilter', 'key': 'KalmanFilter', 'description': 'The program computes time variable gravity fields using the Kalman filter approach of Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012). Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48. https://doi.org/10.1016/j.jog.2012.02.006 . The updated state is determined by solving the least squares adjustment In normal equation form this can be written as where and are the predicted state and its covariance matrix. The process dynamic is represented as an autoregressive model , and passed to the program through The program computes time variable gravity fields using the Kalman filter approach of . The sequence of normal equations are given as list of The program computes time variable gravity fields using the Kalman filter approach of , which can be generated using The program computes time variable gravity fields using the Kalman filter approach of . In the same way, the matrix files for outputfileUpdatedState and inputfileUpdatedStateCovariance can also be specified using The program computes time variable gravity fields using the Kalman filter approach of . If no The program computes time variable gravity fields using the Kalman filter approach of is set, a zero vector with appropriate dimensions is used. The The program computes time variable gravity fields using the Kalman filter approach of however must be given. See also KalmanBuildNormals , KalmanSmoother .', 'config_table': 'outputfileUpdatedState filename estimated state x+ (nx1-matrix) outputfileUpdatedStateCovarianceMatrix filename estimated state\' s covariance matrix Cov(x+) inputfileNormalEquations filename normal equations input file inputfileInitialState filename initial state x0 inputfileInitialStateCovarianceMatrix filename initial state\'s covariance matrix Cov(x0) inputfileAutoregressiveModel filename file name of autoregressive model', 'display_text': 'The program computes time variable gravity fields using the Kalman filter approach of
Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012). Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48. https://doi.org/10.1016/j.jog.2012.02.006.
The updated state $\\mathbf{x}_t^+$ is determined by solving the least squares adjustment \\[ \\mathbf{l}_t = \\mathbf{A}_t \\mathbf{x}_t + \\mathbf{e}_t \\hspace{25pt} \\mathbf{e}_t \\sim \\mathcal{N}(0, \\mathbf{R}_t)\\\\ \\mathbf{B} \\mathbf{x}^+_{t-1} = \\mathbf{I} \\mathbf{x}_t + \\mathbf{v}_t\\hspace{25pt} \\mathbf{v} \\sim \\mathcal{N}(0,\\mathbf{Q} + \\mathbf{B} \\mathbf{P}^+_{t-1}\\mathbf{B}^T). \\]In normal equation form this can be written as \\[ \\hat{\\mathbf{x}}_t = \\mathbf{x}^+_t = (\\mathbf{N}_t + \\mathbf{P}^{-^{-1}}_t)^{-1}(\\mathbf{n}_t + \\mathbf{P}^{-^{-1}}_t \\mathbf{x}^-_t), \\]where $\\mathbf{x}_t^- = \\mathbf{B} \\mathbf{x}^+_{t-1}$ and $\\mathbf{P}_t^{-} = \\mathbf{Q} + \\mathbf{B} \\mathbf{P}^+_{t-1}\\mathbf{B}^T$ are the predicted state and its covariance matrix.
The process dynamic $\\mathbf{B}, \\mathbf{Q}$ is represented as an autoregressive model, and passed to the program through inputfileAutoregressiveModel. The sequence of normal equations $\\mathbf{N}_t, \\mathbf{n}_t$ are given as list of inputfileNormalEquations, which can be generated using loops. In the same way, the matrix files for outputfileUpdatedState and inputfileUpdatedStateCovariance can also be specified using loops.
See also KalmanBuildNormals, KalmanSmoother.'},
'KalmanSmoother': { 'name': 'KalmanSmoother', 'key': 'KalmanSmoother', 'description': 'Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by KalmanFilter . This is the implementation of the approach presented in Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012). Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48. https://doi.org/10.1016/j.jog.2012.02.006 . The result has zero phase and the squared magnitude response of Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by (see autoregressiveModel for details). Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by and Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by are the output of a KalmanFilter forward sweep. The matrix files for Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by , Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by and Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by can also be specified using Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by . See also KalmanBuildNormals , KalmanFilter and KalmanSmootherLeastSquares .', 'config_table': 'outputfileState filename estimated parameters (nx1-matrix) outputfileStateCovarianceMatrix filename estimated parameters\' covariance matrix inputfileUpdatedState filename inputfileUpdatedStateCovarianceMatrix filename inputfileAutoregressiveModel filename file name of autoregressive model', 'display_text': 'Apply the Rauch-Tung-Striebel smoother to a gravity field time series computed by KalmanFilter. This is the implementation of the approach presented in
Kurtenbach, E., Eicker, A., Mayer-Gürr, T., Holschneider, M., Hayn, M., Fuhrmann, M., and Kusche, J. (2012). Improved daily GRACE gravity field solutions using a Kalman smoother. Journal of Geodynamics, 59–60, 39–48. https://doi.org/10.1016/j.jog.2012.02.006.
See also KalmanBuildNormals, KalmanFilter and KalmanSmootherLeastSquares.'},
'KalmanSmootherLeastSquares': { 'name': 'KalmanSmootherLeastSquares', 'key': 'KalmanSmootherLeastSquares', 'description': 'This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a which represent a stationary random process (see the autoregressive model description ) for details. The output files for the estimated gravity field ( This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a ), the corresponding standard deviations ( This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a ) and the full covariance matrix ( This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a ) can be specified using This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a . Similarly, the This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a can also be specified using This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a . See also KalmanBuildNormals , KalmanFilter and KalmanSmoother', 'config_table': 'outputfileSolution filename file name of solution vector (use time tags) outputfileSigmax filename file name of sigma vector (use time tags) outputfileCovariance filename file name of full covariance matrix (use time tags) inputfileNormalEquations filename input normal equations (loopTime will be expanded) autoregressiveModelSequence autoregressiveModelSequenceType file containing AR model for spatiotemporal constraint', 'display_text': 'This program estimates temporal gravity field variations with a constraint least squares adjustment. Prior information is introduced by means of a autoregressiveModelSequence which represent a stationary random process (see the autoregressive model description) for details.
See also KalmanBuildNormals, KalmanFilter andKalmanSmoother'},
'DigitalFilter2FrequencyResponse': { 'name': 'DigitalFilter2FrequencyResponse', 'key': 'DigitalFilter2FrequencyResponse', 'description': 'Compute amplitude-, phase-, group delay and frequency response of a Compute amplitude-, phase-, group delay and frequency response of a cascade. The Compute amplitude-, phase-, group delay and frequency response of a is a matrix with following columns: freq , ampl, phase , group delay , real, imag. When unwrapPhase is set to true, jumps of the phase response are removed before writing the output to file. The response of the filter cascade is given by the product of each individual frequency response: Amplitude and phase response are computed from the frequency response via The group delay is computed by numerically differentiating the phase response The frequency vector for a length and a sampling is given by See also DigitalFilter2ImpulseResponse .', 'config_table': 'outputfileResponse filename columns: freq [Hz], ampl, phase [rad], group delay [-], real, imag digitalFilter digitalFilterType length uint length of the data series in time domain sampling double sampling to determine frequency [seconds] skipZeroFrequency boolean omit zero frequency when writing to file unwrapPhase boolean unwrap phase response', 'display_text': 'Compute amplitude-, phase-, group delay and frequency response of a digitalFilter cascade. The outputfileResponse is a matrix with following columns: freq $[Hz]$, ampl, phase $[rad]$, group delay $[-]$, real, imag.
When unwrapPhase is set to true, $2\\pi$ jumps of the phase response are removed before writing the output to file.
The response of the filter cascade is given by the product of each individual frequency response: \\[ H(f) = \\prod_f H_j(f). \\]Amplitude and phase response are computed from the frequency response via \\[ A(f) = |H(f)| \\hspace{5pt}\\text{and}\\hspace{5pt} \\Phi(f) = \\arctan \\frac{\\mathcal{I}(H(f))}{\\mathcal{R}(H(f))}. \\]The group delay is computed by numerically differentiating the phase response \\[ \\tau_g(f_k) = \\frac{1}{2} \\left[\\frac{\\Phi(f_k) - \\Phi(f_{k-1})}{2\\pi(f_k-f_{k-1})} + \\frac{\\Phi(f_{k+1}) - \\Phi(f_{k})}{2\\pi(f_{k+1}-f_{k})}\\right] \\approx \\frac{d\\Phi}{df}\\frac{df}{d\\omega}. \\]The frequency vector for a length $N$ and a sampling $\\Delta t$ is given by \\[ f_k = \\frac{k}{N \\Delta t}, \\hspace{15pt} k \\in \\{0, \\dots, \\left\\lfloor\\frac{N+2}{2}\\right\\rfloor-1\\}. \\] See also DigitalFilter2ImpulseResponse.'},
'DigitalFilter2ImpulseResponse': { 'name': 'DigitalFilter2ImpulseResponse', 'key': 'DigitalFilter2ImpulseResponse', 'description': 'Impulse response of a Impulse response of a cascade. The impulse response is computed by filtering a sequence with length samples and a unit impulse at index pulseLag . The Impulse response of a is a matrix with the time stamp (zero at pulseLag ) in the first column and the impulse response in the second column. See also DigitalFilter2FrequencyResponse .', 'config_table': 'outputfileResponse filename columns: time [seconds], response digitalFilter digitalFilterType length uint length of the impulse response pulseLag uint start of the pulse in the data series sampling double [seconds]', 'display_text': 'Impulse response of a digitalFilter cascade. The impulse response is computed by filtering a sequence with length samples and a unit impulse at index pulseLag.
The outputfileResponse is a matrix with the time stamp (zero at pulseLag) in the first column and the impulse response $h_k$ in the second column.
See also DigitalFilter2FrequencyResponse.'},
'EarthOrientationParameterTimeSeries': { 'name': 'EarthOrientationParameterTimeSeries', 'key': 'EarthOrientationParameterTimeSeries', 'description': 'Computes a Computes a (GPS time) of Earth Orientation Parameter (EOP). The instrument file (MISCVALUES) contains the elements at each epoch in the following order: [rad] [rad] [rad] [seconds] length of day (LOD) [seconds] [rad] [rad] [rad] The values are in situ values with all corrections and models applied. The time series can be used to precompute Earth rotation with a low temporal resolution (e.g. 10 min) and reuse the file in Computes a to interpolate the data to the needed epochs (e.g. to rotate orbit data). As some Earth rotation models are quite slow this can accelerate the computation.', 'config_table': 'outputfileEOP filename each row: mjd(GPS), xp, yp, sp, dUT1, LOD, X, Y, S earthRotation earthRotationType timeSeries timeSeriesType', 'display_text': 'Computes a timeSeries (GPS time) of Earth Orientation Parameter (EOP). The instrument file (MISCVALUES) contains the elements at each epoch in the following order:
$x_p$ [rad]
$y_p$ [rad]
$s_p$ [rad]
$UT1-UTC$ [seconds]
length of day (LOD) [seconds]
$X$ [rad]
$Y$ [rad]
$S$ [rad]
The values are in situ values with all corrections and models applied. The time series can be used to precompute Earth rotation with a low temporal resolution (e.g. 10 min) and reuse the file in earthRotation:file to interpolate the data to the needed epochs (e.g. to rotate orbit data). As some Earth rotation models are quite slow this can accelerate the computation.'},
'EarthRotaryVectorTimeSeries': { 'name': 'EarthRotaryVectorTimeSeries', 'key': 'EarthRotaryVectorTimeSeries', 'description': 'Computes a Computes a of Earth\'s rotary axis and its temporal derivative at Computes a (GPS time). The instrument file (MISCVALUES) contains the elements at each epoch in the following order: .', 'config_table': 'outputfileTimeSeries filename wx, wy, wz [rad], dwx, dwy, dwz [rad/s^2] earthRotation earthRotationType timeSeries timeSeriesType inTRF boolean terrestrial reference frame, otherwise celestial', 'display_text': 'Computes a outputfileTimeSeries of Earth\'s rotary axis and its temporal derivative at timeSeries (GPS time). The instrument file (MISCVALUES) contains the elements at each epoch in the following order:
$\\omega_x [rad/s]$
$\\omega_y [rad/s]$
$\\omega_z [rad/s]$
$\\dot{\\omega}_x [rad/s^2]$
$\\dot{\\omega}_y [rad/s^2]$
$\\dot{\\omega}_z [rad/s^2]$.
'},
'EclipseFactor2GriddedData': { 'name': 'EclipseFactor2GriddedData', 'key': 'EclipseFactor2GriddedData', 'description': 'This program converts the output of a This program converts the output of a model on a given This program converts the output of a . The time for the evaluation can be specified in time . The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening .', 'config_table': 'outputfileGriddedData filename eclipse factor grid gridType eclipse eclipseType ephemerides ephemeridesType earthRotation earthRotationType time time R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program converts the output of a eclipse model on a given grid. The time for the evaluation can be specified in time. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.'},
'FilterMatrixWindowedPotentialCoefficients': { 'name': 'FilterMatrixWindowedPotentialCoefficients', 'key': 'FilterMatrixWindowedPotentialCoefficients', 'description': 'Create a spherical harmonic window matrix. The window matrix is generated in space domain through spherical harmonic synthesis and analysis matrices. The resulting linear operator can be written as Here, is a diagonal matrix with the Create a spherical harmonic window matrix. The window matrix coefficients on the main diagonal, is the spherical harmonic synthesis matrix, is defined by the values in inputfileGriddedData and the expression value , is the spherical harmonic analysis matrix. The resulting window matrix is written to a matrix file. The spherical harmonic degree range, and coefficient numbering are defined by minDegree , maxDegree , and Create a spherical harmonic window matrix. The window matrix . Note that a proper window function should contain values in the range [0, 1]. The window function can feature a smooth transition between 0 and 1 to avoid ringing effects.', 'config_table': 'outputfileWindowMatrix filename inputfileGriddedData filename gridded data which defines the window function in space domain value expression expression to compute the window function (input columns are named data0, data1, ...) kernel kernelType kernel for windowing minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme for solution vector', 'display_text': 'Create a spherical harmonic window matrix. The window matrix $\\mathbf{W}$ is generated in space domain through spherical harmonic synthesis and analysis matrices. The resulting linear operator can be written as \\[ \\mathbf{W} = \\mathbf{K} \\mathbf{A} \\mathbf{\\Omega} \\mathbf{S} \\mathbf{K}^{-1}. \\]Here, $\\mathbf{K}$ is a diagonal matrix with the kernel coefficients on the main diagonal, $\\mathbf{S}$ is the spherical harmonic synthesis matrix, $\\mathbf{\\Omega}$ is defined by the values in inputfileGriddedData and the expression value, $\\mathbf{A}$ is the spherical harmonic analysis matrix. The resulting window matrix is written to a matrix file.
The spherical harmonic degree range, and coefficient numbering are defined by minDegree, maxDegree, and numbering.
Note that a proper window function $\\mathbf{\\Omega}$ should contain values in the range [0, 1]. The window function $\\mathbf{\\Omega}$ can feature a smooth transition between 0 and 1 to avoid ringing effects.'},
'FunctionsCalculate': { 'name': 'FunctionsCalculate', 'key': 'FunctionsCalculate', 'description': 'This program manipulates matrix files with data in columns. If several inputfile s are given the data columns are copied side by side. All inputfile s must contain the same number of rows. The columns are enumerated by data0 , data1 , . The content of This program manipulates is controlled by outColumn . The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each row of the input. The variables data0 , data1 , are replaced by the according values from the input columns before. Additional variables are available, e.g. index , data0rms , see dataVariables . If no outColumn are specified all input columns are used instead directly. For a simplified handling constant s can be defined by name=value , e.g. annual=365.25 . It is also possible to estimate parameter s in a least squares adjustment. The leastSquares serves as template for observation equations for every row. The expression leastSquares is evaluated for each row in the inputfile . The variables data0 , data1 , are replaced by the according values from the input columns before. In the next step the parameters are estimated in order to minimize the expressions in leastSquares in the sense of least squares. Afterwards complete rows are removed if one of the removalCriteria expressions for this row evaluates true (not zero). An extra statistics file can be generated with one row of data. For the computation of the outColumn values all dataVariables are available (e.g. data3mean , data4std ) inclusively the constant s and estimated parameter s but without the data0 , data1 , itself. The variables and the numbering of the columns refers to the This program manipulates . First example: To calculate the mean of two values at each row set outColumn to 0.5*(data1+data0) . Second example: An input file contain a column with times and a column with values. To remove a trend from the values define the parameter s trend and bias . The observation equation in leastSquares is data1 - (trend*data0+bias) . For output you can define the following columns for example: outColumn = data0 : points in time. outColumn = data1 : the values itself. outColumn = trend*data0+bias : the linear fit. outColumn = data1-trend*data0-bias : the residuals. The extra statistics file could contain in this case: outColumn = data0max-data0min : time span. outColumn = bias : estimated parameter. outColumn = trend : estimated parameter. outColumn = data3rms : root mean square of the residuals. See also InstrumentArcCalculate , GriddedDataCalculate , MatrixCalculate .', 'config_table': 'outputfile filename inputfile filename constant expression define a constant by name=value parameter expression define a parameter by name[=value] leastSquares expression try to minimize the expression by adjustment of the parameters removalCriteria expression row is removed if one criterion evaluates true. outColumn expression expression to compute output columns (input columns are named data0, data1, ...) statistics sequence outputfile filename matrix with one row, columns are user defined outColumn expression expression to compute statistics columns, data* are the outputColumns', 'display_text': 'This program manipulates matrix files with data in columns. If several inputfiles are given the data columns are copied side by side. All inputfiles must contain the same number of rows. The columns are enumerated by data0, data1, .
The content of outputfile is controlled by outColumn. The algorithm to compute the output is as follows: The expressions in outColumn are evaluated once for each row of the input. The variables data0, data1, are replaced by the according values from the input columns before. Additional variables are available, e.g. index, data0rms, see dataVariables. If no outColumn are specified all input columns are used instead directly.
For a simplified handling constants can be defined by name=value, e.g. annual=365.25. It is also possible to estimate parameters in a least squares adjustment. The leastSquares serves as template for observation equations for every row. The expression leastSquares is evaluated for each row in the inputfile. The variables data0, data1, are replaced by the according values from the input columns before. In the next step the parameters are estimated in order to minimize the expressions in leastSquares in the sense of least squares.
Afterwards complete rows are removed if one of the removalCriteria expressions for this row evaluates true (not zero).
An extra statistics file can be generated with one row of data. For the computation of the outColumn values all dataVariables are available (e.g. data3mean, data4std) inclusively the constants and estimated parameters but without the data0, data1, itself. The variables and the numbering of the columns refers to the outputfile.
First example: To calculate the mean of two values at each row set outColumn to 0.5*(data1+data0).
Second example: An input file contain a column with times and a column with values. To remove a trend from the values define the parameters trend and bias. The observation equation in leastSquares is data1 - (trend*data0+bias). For output you can define the following columns for example:
outColumn=data0: points in time.
outColumn=data1: the values itself.
outColumn=trend*data0+bias: the linear fit.
outColumn=data1-trend*data0-bias: the residuals.
The extra statistics file could contain in this case:
outColumn=data0max-data0min: time span.
outColumn=bias: estimated parameter.
outColumn=trend: estimated parameter.
outColumn=data3rms: root mean square of the residuals.
See also InstrumentArcCalculate, GriddedDataCalculate, MatrixCalculate.'},
'Grs2PotentialCoefficients': { 'name': 'Grs2PotentialCoefficients', 'key': 'Grs2PotentialCoefficients', 'description': 'This program creates potential coefficients from the defining constants of a Geodetic Reference System (GRS). The potential coeffiencts excludes the centrifugal part. The form of the reference ellipsoid is either determined by the dynamical form factor J2 , or the geometric inverseFlattening . One of those form parameters must be specified. The default values create the GRS80.', 'config_table': 'outputfilePotentialCoefficients filename maxDegree uint GM double Geocentric gravitational constant R double reference radius omega double Angular velocity of rotation J2 double Dynamical form factor inverseFlattening double Geometric inverse flattening of reference ellipsoid (0: sphere, ignored when J2 is set)', 'display_text': 'This program creates potential coefficients from the defining constants of a Geodetic Reference System (GRS). The potential coeffiencts excludes the centrifugal part. The form of the reference ellipsoid is either determined by the dynamical form factor J2, or the geometric inverseFlattening. One of those form parameters must be specified.
The default values create the GRS80.'},
'Kaula2SigmaPotentialCoefficients': { 'name': 'Kaula2SigmaPotentialCoefficients', 'key': 'Kaula2SigmaPotentialCoefficients', 'description': 'Create signal standard deviations of potential coefficients according Kaula\'s rule of thumb with the degree , the factor , and the power . The standard deviations are written as formal errors of Create signal standard deviations of potential coefficients according Kaula\'s rule of thumb .', 'config_table': 'outputfilePotentialCoefficients filename minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius power double sigma = factor/degree^power factor double sigma = factor/degree^power', 'display_text': 'Create signal standard deviations of potential coefficients according Kaula\'s rule of thumb \\[ \\sigma_n = \\frac{f}{n^p}, \\]with the degree $n$, the factor $f$, and the power $p$.
The standard deviations are written as formal errors of outputfilePotentialCoefficients.'},
'Kernel2Coefficients': { 'name': 'Kernel2Coefficients', 'key': 'Kernel2Coefficients', 'description': 'This program computes and returns the coefficients and inverse coefficients of a This program computes and returns the coefficients and inverse coefficients of a from from minDegree to maxDegree at a given height . The main purpose is for visualization with PlotGraph .', 'config_table': 'outputfileMatrix filename matrix with columns degree, coefficients and inverse coefficients kernel kernelType minDegree uint minimum degree of returned coefficients maxDegre uint compute coefficients up to maxDegree height double evaluate kernel at R+height [m] R double reference radius', 'display_text': 'This program computes and returns the coefficients and inverse coefficients of a kernel from from minDegree to maxDegree at a given height.
The main purpose is for visualization with PlotGraph.'},
'Kernel2SigmaPotentialCoefficients': { 'name': 'Kernel2SigmaPotentialCoefficients', 'key': 'Kernel2SigmaPotentialCoefficients', 'description': 'Create variances of spherical harmonics by convolution a kernel with white noise, e.g. to display filter coefficients of a Gaussian filter. The coefficients are written as formal errors of Create variances of spherical harmonics by convolution a kernel with white noise, e.g. to display filter coefficients of a Gaussian filter. The coefficients are written as formal errors of .', 'config_table': 'outputfilePotentialCoefficients filename kernel kernelType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius factor double', 'display_text': 'Create variances of spherical harmonics by convolution a kernel with white noise, e.g. to display filter coefficients of a Gaussian filter. The coefficients are written as formal errors of outputfilePotentialCoefficients.'},
'KernelEvaluate': { 'name': 'KernelEvaluate', 'key': 'KernelEvaluate', 'description': 'Compute Compute values for distant angles. The main purpose is for visualization with PlotGraph .', 'config_table': 'outputfileMatrix filename matrix with first column the angle [degree], second the kernel value kernel kernelType minAngle angle [degree] maxAngle angle [degree] sampling angle [degree] height double evaluate at R+height [m] R double reference radius', 'display_text': 'Compute kernel values for distant angles. The main purpose is for visualization with PlotGraph.'},
'MagneticField2GriddedData': { 'name': 'MagneticField2GriddedData', 'key': 'MagneticField2GriddedData', 'description': 'Computes x, y, z of the magentic field vector.', 'config_table': 'outputfileGriddedData filename x, y, z [Tesla = kg/A/s**2] magnetosphere magnetosphereType grid gridType time time localReferenceFrame boolean local left handed reference frame (north, east, up) R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'Computes x, y, z of the magentic field vector.'},
'MatrixCalculate': { 'name': 'MatrixCalculate', 'key': 'MatrixCalculate', 'description': 'This program creates a matrix from multiple matrices. All This program creates a are summed up. The size of the resulting matrix is exandeded to fit all matrices. The class This program creates a allows complex matrix operations before.', 'config_table': 'outputfileMatrix filename matrix matrixGeneratorType', 'display_text': 'This program creates a matrix from multiple matrices. All matrices are summed up. The size of the resulting matrix is exandeded to fit all matrices. The class matrixGenerator allows complex matrix operations before.'},
'ObservationEquations2Files': { 'name': 'ObservationEquations2Files', 'key': 'ObservationEquations2Files', 'description': 'This program computes the linearized and decorrelated equation system for each arc : using class This program computes the linearized and decorrelated equation system for each arc and writes , and as matrix files.', 'config_table': 'outputfileObservationVector filename one file for each arc outputfileDesignMatrix filename one file for each arc, without arc related parameters outputfileDesignMatrixArc filename one file for each arc, arc related parameters variableArc string variable with arc number outputfileParameterNames filename without arc related parameters observation observationType', 'display_text': 'This program computes the linearized and decorrelated equation system for each arc $i$: \\[ \\M l_i = \\M A_i \\M x + \\M B_i \\M y_i + \\M e_i \\]using class observation and writes $\\M A_i$, $\\M B_i$ and $\\M l_i$ as matrix files.'},
'PlatformCreate': { 'name': 'PlatformCreate', 'key': 'PlatformCreate', 'description': 'Create a Platform file from scratch by defining attributes such as markerName , markerNumber , comment , approxPosition , equipment . See also GnssAntex2AntennaDefinition and GnssStationLog2Platform .', 'config_table': 'outputfilePlatform filename markerName string markerNumber string comment string approxPositionX double [m] in TRF approxPositionY double [m] in TRF approxPositionZ double [m] in TRF equipment choice gnssAntenna sequence name string serial string radome string comment string timeStart time timeEnd time positionX double [m] ARP in north, east, up or vehicle system positionY double [m] ARP in north, east, up or vehicle system positionZ double [m] ARP in north, east, up or vehicle system rotationX angle [degree] from local/vehicle to left-handed antenna system rotationY angle [degree] from local/vehicle to left-handed antenna system rotationZ angle [degree] from local/vehicle to left-handed antenna system flipX boolean flip x-axis (after rotation) flipY boolean flip y-axis (after rotation) flipZ boolean flip z-axis (after rotation) gnssReceiver sequence name string serial string version string comment string timeStart time timeEnd time laserRetroReflector sequence name string e.g. GFZ, ITE, IPIE serial string comment string timeStart time timeEnd time positionX double [m] optial reference point RP in satellite system positionY double [m] optial reference point RP in satellite system positionZ double [m] optial reference point RP in satellite system rotationX angle [degree] from local/vehicle to LRR system rotationY angle [degree] from local/vehicle to LRR system rotationZ angle [degree] from local/vehicle to LRR system flipX boolean flip x-axis (after rotation) flipY boolean flip y-axis (after rotation) flipZ boolean flip z-axis (after rotation) range double [m] range bias (only without range matrix) inputfileRangeMatrix filename [m] (azimuth(0..360) x zenith(0..dZenit*rows) dZenit angle [degree] increment of range matrix geodeticSatellite sequence e.g. LAGEOS name string serial string comment string timeStart time timeEnd time range double [m] standard center-of-mass correction slrStation sequence name string CDP SOD 8-digit No. serial string IERS DOMES comment string timeStart time timeEnd time positionX double [m] eccentricity in north positionY double [m] eccentricity in east positionZ double [m] eccentricity in up satelliteIdentifier sequence name string serial string cospar string Satellite COSPAR ID norad string Satellite Catalog (NORAD) Number sic string SIC Code sp3 string SP3 comment string timeStart time timeEnd time other sequence name string serial string comment string timeStart time timeEnd time positionX double [m] in north, east, up or vehicle system positionY double [m] in north, east, up or vehicle system positionZ double [m] in north, east, up or vehicle system referencePoint sequence e.g. center of mass in satellite frame comment string xStart double [m] in north, east, up or vehicle system yStart double linear motion between start and end zStart double xEnd double [m] in north, east, up or vehicle system yEnd double linear motion between start and end zEnd double timeStart time timeEnd time', 'display_text': 'Create a Platform file from scratch by defining attributes such as markerName, markerNumber, comment, approxPosition, equipment.
'},
'PotentialCoefficients2BlockMeanTimeSplines': { 'name': 'PotentialCoefficients2BlockMeanTimeSplines', 'key': 'PotentialCoefficients2BlockMeanTimeSplines', 'description': 'This program is a simplified version of Gravityfield2TimeSplines . It reads a series of potential coefficient files ( This program is a simplified version of ) and creates a time splines file with spline degree 0 (temporal block means) or degree 1 (linear splines). The time intervals in which the potential coefficients are valid are defined between adjacent points in time given by splineTimeSeries . Therefore one more point in time is needed than the number of potential coefficient files for degree 0. The coefficients can be filtered with This program is a simplified version of . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . This program is useful e.g. to combine monthly GRACE solutions to one file.', 'config_table': 'outputfileTimeSplines filename outputfileTimeSplinesCovariance filename only the variances are saved inputfilePotentialCoefficients filename filter sphericalHarmonicsFilterType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius removeMean boolean remove the temporal mean of the series before estimating the splines interpolate boolean interpolate missing files splineTimeSeries timeSeriesType input files must be between points in time splineDegree uint degree of splines', 'display_text': 'This program is a simplified version of Gravityfield2TimeSplines. It reads a series of potential coefficient files (inputfilePotentialCoefficients) and creates a time splines file with spline degree 0 (temporal block means) or degree 1 (linear splines). The time intervals in which the potential coefficients are valid are defined between adjacent points in time given by splineTimeSeries. Therefore one more point in time is needed than the number of potential coefficient files for degree 0.
The coefficients can be filtered with filter. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
This program is useful e.g. to combine monthly GRACE solutions to one file.'},
'PotentialCoefficients2DegreeAmplitudes': { 'name': 'PotentialCoefficients2DegreeAmplitudes', 'key': 'PotentialCoefficients2DegreeAmplitudes', 'description': 'This program computes degree amplitudes from potentialCoefficients files and saves them to a matrix file. The coefficients can be filtered with This program computes degree amplitudes from and converted to different functionals with This program computes degree amplitudes from . The gravity field can be evaluated at different altitudes by specifying evaluationRadius . Polar regions can be excluded by setting polarGap . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM . The This program computes degree amplitudes from contains in the first 3 columns the degree, the degree amplitude, and the formal errors. For each additional This program computes degree amplitudes from three columns are appended: the degree amplitude, the formal errors, and the difference to the first file. For example the data columns for 4 This program computes degree amplitudes from are degree= data0 PotentialCoefficients0: signal= data1 , error= data2 , PotentialCoefficients1: signal= data3 , error= data4 , difference= data5 , PotentialCoefficients2: signal= data6 , error= data7 , difference= data8 , PotentialCoefficients3: signal= data9 , error= data10 , difference= data11 . See also Gravityfield2DegreeAmplitudes .', 'config_table': 'outputfileMatrix filename matrix with degree, signal amplitude, formal error, differences inputfilePotentialCoefficients filename kernel kernelType filter sphericalHarmonicsFilterType filter the coefficients type choice type of variances rms degree amplitudes (square root of degree variances) accumulation cumulate variances over degrees evaluationRadius double evaluate the gravity field at this radius (default: evaluate at surface polarGap angle exclude polar regions (aperture angle in degrees) minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'This program computes degree amplitudes from potentialCoefficients files and saves them to a matrix file.
The coefficients can be filtered with filter and converted to different functionals with kernel. The gravity field can be evaluated at different altitudes by specifying evaluationRadius. Polar regions can be excluded by setting polarGap. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The coefficients are related to the reference radius R and the Earth gravitational constant GM.
The outputfileMatrix contains in the first 3 columns the degree, the degree amplitude, and the formal errors. For each additional inputfilePotentialCoefficients three columns are appended: the degree amplitude, the formal errors, and the difference to the first file.
See also Gravityfield2DegreeAmplitudes.'},
'RadialBasisSplines2KernelCoefficients': { 'name': 'RadialBasisSplines2KernelCoefficients', 'key': 'RadialBasisSplines2KernelCoefficients', 'description': 'This program calculates the coefficients of a This program calculates the coefficients according to from a given This program calculates the coefficients , with R and GM describing the reference radius and the geocentric constant, respectively. The stand for the gravity field accuracies (from degree minDegree to maxDegree ), if they are given. If no accuracies are provided, the represent the square root of the degree variances of the gravity field. If maxDegree exceeds the maximum degree given by This program calculates the coefficients , the higher degrees are complemented by Kaula\'s rule The output of the coefficients is given in the file This program calculates the coefficients .', 'config_table': 'outputfileCoefficients filename gravityfield gravityfieldType use sigmas, if not given use signal (cnm,snm), if not given use kaulas rule minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius kaulaPower double sigma = kaulaFactor/degree^kaulaPower kaulaFactor double sigma = kaulaFactor/degree^kaulaPower', 'display_text': 'This program calculates the coefficients $k_n$ of a kernel:coefficients according to \\[ k_n = \\frac{GM}{4\\pi R}\\frac{\\sigma_n}{\\sqrt{2n+1}}. \\]from a given gravityfield, with R and GM describing the reference radius and the geocentric constant, respectively. The $\\sigma_n$ stand for the gravity field accuracies (from degree minDegree to maxDegree), if they are given. If no accuracies are provided, the $\\sigma_n$ represent the square root of the degree variances of the gravity field. If maxDegree exceeds the maximum degree given by gravityfield, the higher degrees are complemented by Kaula\'s rule The output of the coefficients is given in the file outputfileCoefficients.'},
'SatelliteModelCreate': { 'name': 'SatelliteModelCreate', 'key': 'SatelliteModelCreate', 'description': 'This program creates a satellite macro model for the estimation of non-gravitational accelerations acting on a satellite. Mandatory input values are the satelliteName , mass , coefficientDrag and information about the satellite surfaces . For low Earth orbiting satellites, like GRACE for instance, a good guess for the drag coefficient could be 2.3. Apart from that, it is latter on possible to estimate a more precise variable drag coefficient (e.g. This program creates a satellite macro model for the estimation of non-gravitational accelerations acting on a satellite. Mandatory input values are the ), which will override this initial guess. Concerning the satellite surfaces an external file must be imported which must contain information about each single satellite plate in terms of plate area , the associated plate normal and re-radiation properties (reflexion, diffusion and absorption) properties in the visible and IR part. Examplarily, a description of the macro model for GRACE can be found under: https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/ProdSpecDoc_v4.6.pdf Additionally, it is possible to add further information like antennaThrust, solar panel, temporal mass changes and massInstrument using the modules option.', 'config_table': 'outputfileSatelliteModel filename satellite sequence satelliteName string mass double coefficientDrag double surfaces sequence inputfile filename each line must contain one surface element type expression 0: plate, 1: sphere, 2: cylinder area expression [m^2] normalX expression normalY expression normalZ expression reflexionVisible expression diffusionVisible expression absorptionVisible expression reflexionInfrared expression diffusionInfrared expression absorptionInfrared expression specificHeatCapacity expression 0: no thermal radiation, -1: direct reemission [Ws/K/m^2] module choice antennaThrust sequence thrustX double thrustY double thrustZ double solarPanel sequence rotationAxisX double rotationAxisY double rotationAxisZ double normalX double Direction to sun normalY double Direction to sun normalZ double Direction to sun indexSurface uint index of solar panel surfaces massChange sequence time time mass double massInstrument sequence inputfileInstrument filename', 'display_text': 'This program creates a satellite macro model for the estimation of non-gravitational accelerations acting on a satellite. Mandatory input values are the satelliteName, mass, coefficientDrag and information about the satellite surfaces. For low Earth orbiting satellites, like GRACE for instance, a good guess for the drag coefficient could be 2.3. Apart from that, it is latter on possible to estimate a more precise variable drag coefficient (e.g. miscAccelerations:atmosphericDrag), which will override this initial guess. Concerning the satellite surfaces an external file must be imported which must contain information about each single satellite plate in terms of plate area, the associated plate normal and re-radiation properties (reflexion, diffusion and absorption) properties in the visible and IR part. Examplarily, a description of the macro model for GRACE can be found under: https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/ProdSpecDoc_v4.6.pdf Additionally, it is possible to add further information like antennaThrust, solar panel, temporal mass changes and massInstrument using the modules option.'},
'SynthesisSphericalHarmonicsMatrix': { 'name': 'SynthesisSphericalHarmonicsMatrix', 'key': 'SynthesisSphericalHarmonicsMatrix', 'description': 'This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on the points defined in This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on the points defined in . Depending on the chosen type (synthesis, quadrature, or leastSquares), the resulting matrix can be used to: synthesis : Map spherical harmonic coefficients to values on a grid, quadrature : Integrate grid-based functionals into spherical harmonic coefficients by a simple quadrature formula, leastSquares : Estimate coefficients from grid data via a least squares approach. he spherical harmonic degree range is constrained by minDegree and maxDegree , and the ordering of the coefficients is given by This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on the points defined in . The reference gravitational constant is GM , and the reference radius is R . The computed matrix is written to This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on the points defined in with dimensions (number of grid points) (number of spherical harmonic coefficients). For type = leastSquares , the program applies a QR-based pseudo-inverse so that the output matrix can directly form the normal-equation building blocks for a blockwise least-squares solution in spherical harmonic space. See also Gravityfield2GriddedData , GriddedData2PotentialCoefficients , Gravityfield2SphericalHarmonicsVector , and MatrixCalculate for additional tools to convert between grids and spherical harmonics.', 'config_table': 'outputfileMatrix filename grid gridType kernel kernelType minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme of sh coefficients type choice synthesis synthesize spherical harmonics on a grid quadrature calculate spherical harmonics from grid leastSquares estimated spherical harmonics from grid', 'display_text': 'This program builds a linear operator matrix for spherical harmonic analysis or synthesis based on the points defined in grid. Depending on the chosen type (synthesis, quadrature, or leastSquares), the resulting matrix can be used to:
synthesis: Map spherical harmonic coefficients to values on a grid,
quadrature: Integrate grid-based functionals into spherical harmonic coefficients by a simple quadrature formula,
leastSquares: Estimate coefficients from grid data via a least squares approach.
he spherical harmonic degree range is constrained by minDegree and maxDegree, and the ordering of the coefficients is given by numbering. The reference gravitational constant is GM, and the reference radius is R.
The computed matrix is written to outputfileMatrix with dimensions (number of grid points) $\\times$ (number of spherical harmonic coefficients). For type = leastSquares, the program applies a QR-based pseudo-inverse so that the output matrix can directly form the normal-equation building blocks for a blockwise least-squares solution in spherical harmonic space.
See also Gravityfield2GriddedData, GriddedData2PotentialCoefficients, Gravityfield2SphericalHarmonicsVector, and MatrixCalculate for additional tools to convert between grids and spherical harmonics.'},
'TemporalRepresentation2TimeSeries': { 'name': 'TemporalRepresentation2TimeSeries', 'key': 'TemporalRepresentation2TimeSeries', 'description': 'This program computes the design matrix of temporal representation at a given time series. The output matrix contains the time steps in MJD in the first column, the other columns contain the design matrix. The intention of this program is to visualize the parametrization together with PlotGraph .', 'config_table': 'outputfileMatrix filename Time (MJD) in first column, design matrix follows timeSeries timeSeriesType temporal parametrizationTemporalType', 'display_text': 'This program computes the design matrix of temporal representation at a given time series. The output matrix contains the time steps in MJD in the first column, the other columns contain the design matrix. The intention of this program is to visualize the parametrization together with PlotGraph.'},
'ThermosphericState2GriddedData': { 'name': 'ThermosphericState2GriddedData', 'key': 'ThermosphericState2GriddedData', 'description': 'This program converts the output (neutral mass density,temperature) of an empirical thermosphere model (e.g. JB2008) on a given This program converts the output (neutral mass density,temperature) of an empirical thermosphere model (e.g. JB2008) on a given . Additionally, also the thermospheric winds estimated by using the horizontal wind model HWM 2014 can be assessed. The time for the evaluation can be specified in time . The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening .', 'config_table': 'outputfileGriddedData filename density [kg/m**3], temperature [K], wind (x, y, z) [m/s**2] thermosphere thermosphereType grid gridType time time localReferenceFrame boolean wind in local north, east, up, otherwise global terrestrial R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates', 'display_text': 'This program converts the output (neutral mass density,temperature) of an empirical thermosphere model (e.g. JB2008) on a given grid. Additionally, also the thermospheric winds estimated by using the horizontal wind model HWM 2014 can be assessed. The time for the evaluation can be specified in time. The values will be saved together with points expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening.'},
'TimeSeries2PotentialCoefficients': { 'name': 'TimeSeries2PotentialCoefficients', 'key': 'TimeSeries2PotentialCoefficients', 'description': 'Interpret the data columns of Interpret the data columns of as potential coefficients. The sequence of coefficients is given by Interpret the data columns of starting from data column startDataFields . For each epoch a Interpret the data columns of is written where the variableLoopTime and variableLoopIndex are expanded for each point of the given time series to create the file name for this epoch, see text parser . See also Gravityfield2PotentialCoefficientsTimeSeries .', 'config_table': 'outputfilesPotentialCoefficients filename for each epoch variableLoopTime string variable with time of each epoch variableLoopIndex string variable with index of current epoch (starts with zero) variableLoopCount string variable with total number of epochs inputfileTimeSeries filename each epoch: multiple data for points (MISCVALUES) startDataFields uint first data column minDegree uint minimal degree maxDegree uint maximal degree GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme', 'display_text': 'Interpret the data columns of inputfileTimeSeries as potential coefficients. The sequence of coefficients is given by numbering starting from data column startDataFields.
For each epoch a outputfilesPotentialCoefficients is written where the variableLoopTime and variableLoopIndex are expanded for each point of the given time series to create the file name for this epoch, see text parser.
See also Gravityfield2PotentialCoefficientsTimeSeries.'},
'TimeSeriesCreate': { 'name': 'TimeSeriesCreate', 'key': 'TimeSeriesCreate', 'description': 'This program generates an instrument file , containing a time series.', 'config_table': 'outputfileTimeSeries filename instrument file timeSeries timeSeriesType time series to be created data expression expression of output columns, extra \'epoch\' variable', 'display_text': 'This program generates an instrument file, containing a time series.'},
'Variational2OrbitAndStarCamera': { 'name': 'Variational2OrbitAndStarCamera', 'key': 'Variational2OrbitAndStarCamera', 'description': 'Extracts the reference Extracts the reference , Extracts the reference , and Extracts the reference from Extracts the reference .', 'config_table': 'outputfileOrbit filename output orbit (instrument) file outputfileStarCamera filename output satellite attidude as star camera (instrument) file outputfileEarthRotation filename output Earth rotation as star camera (instrument) file inputfileVariational filename input variational file', 'display_text': 'Extracts the reference outputfileOrbit, outputfileStarCamera, and outputfileEarthRotation from inputfileVariational.'},
'NormalsAccumulate': { 'name': 'NormalsAccumulate', 'key': 'NormalsAccumulate', 'description': 'This program accumulates normal equations and writes the total combined system to This program accumulates normal equations and writes the total combined system to . The This program accumulates normal equations and writes the total combined system to s must have all the same size and the same block structure. This program is the simplified and fast version of the more general program NormalsBuild . For input normals with different parameters, see NormalsReorderAndAccumulate .', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename', 'display_text': 'This program accumulates normal equations and writes the total combined system to outputfileNormalequation. The inputfileNormalEquations must have all the same size and the same block structure.
This program is the simplified and fast version of the more general program NormalsBuild. For input normals with different parameters, see NormalsReorderAndAccumulate.'},
'NormalsBuild': { 'name': 'NormalsBuild', 'key': 'NormalsBuild', 'description': 'This program accumulates This program accumulates s and writes the total combined system to This program accumulates . For a detailed description of the used algorithm see This program accumulates . Large normal equation systems can be divided into blocks with normalsBlockSize . A simplifed and fast version of this program is NormalsAccumulate . For input normals with different parameters see NormalsReorderAndAccumulate . To solve the system of normal equations use NormalsSolverVCE .', 'config_table': 'outputfileNormalEquation filename normalEquation normalEquationType normalsBlockSize uint block size for distributing the normal equations, 0: one block', 'display_text': 'This program accumulates normalEquations and writes the total combined system to outputfileNormalequation. For a detailed description of the used algorithm see normalEquation. Large normal equation systems can be divided into blocks with normalsBlockSize.
A simplifed and fast version of this program is NormalsAccumulate. For input normals with different parameters see NormalsReorderAndAccumulate. To solve the system of normal equations use NormalsSolverVCE.'},
'NormalsBuildShortTimeStaticLongTime': { 'name': 'NormalsBuildShortTimeStaticLongTime', 'key': 'NormalsBuildShortTimeStaticLongTime', 'description': 'This program sets up normal equations based on This program sets up normal equations based on . Additionally short time and long time variations can be parametrized based on the static parameters in This program sets up normal equations based on in an efficient way. The observation equations are divided into time intervals (e.g. daily) as defined in This program sets up normal equations based on . With estimateLongTimeVariations additional temporal variations can be co-estimated for a subset of the parameters selected by This program sets up normal equations based on . These parameters might be spherical harmonic coefficients with a limited maximum degree. The temporal variations are represented by base functions (e.g. trend and annual oscillation) given by This program sets up normal equations based on . The temporal base functions are evaluated at the mid time of each interval , multiplicated with the design matrix of the selected parameters, and the design matrix is extended accordingly. With estimateShortTimeVariations short time variations of the gravity field can be co-estimated. Their purpose is to mitigate temporal aliasing. The short time parameters selected by This program sets up normal equations based on (e.g. daily constant or linear splines every 6 hour) are constrained by an This program sets up normal equations based on . If only a static parameter set is selected the coressponding part of the design matrix is copied and modeled as a constant value per interval in This program sets up normal equations based on additionally so the corresponding temporal factor can be expressed as Before writing the normal equations to This program sets up normal equations based on short time gravity and satellite specific parameters can be eliminated with eliminateParameter . Example: For the computation of the mean gravity field ITSG-Grace2018s with additional trend and annual signal the normal equations are computed month by month and accumulated afterwards (see NormalsAccumulate ). The observations were divided into daily intervals with This program sets up normal equations based on . The static gravity field has been parametrized as spherical harmonics up to degree in This program sets up normal equations based on . The trend and annual signals defined by This program sets up normal equations based on were estimated for selected parameters up to degree . To mitigate temporal aliasing daily gravity fields up to degree were setup and constrained with an This program sets up normal equations based on up to order three. A detailed description of the approach is given in: Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1 .', 'config_table': 'outputfileNormalEquation filename outputfile for normal equations observation observationType estimateShortTimeVariations sequence co-estimate short time gravity field variations autoregressiveModelSequence autoregressiveModelSequenceType AR model sequence for constraining short time gravity variations parameterSelection parameterSelectorType parameters describing the short time gravity field estimateLongTimeVariations sequence co-estimate long time gravity field variations parametrizationTemporal parametrizationTemporalType parametrization of time variations (trend, annual, ...) parameterSelection parameterSelectorType parameters describing the long time gravity field inputfileArcList filename list to correspond points of time to arc numbers defaultBlockSize uint block size for distributing the normal equations, 0: one block eliminateParameter boolean eliminate short time and state parameter', 'display_text': 'This program sets up normal equations based on observation. Additionally short time and long time variations can be parametrized based on the static parameters in observation in an efficient way. The observation equations are divided into time intervals $i \\in \\{1, ..., N\\}$ (e.g. daily) as defined in inputfileArcList.
With estimateLongTimeVariations additional temporal variations can be co-estimated for a subset of the parameters selected by parameterSelection. These parameters might be spherical harmonic coefficients with a limited maximum degree. The temporal variations are represented by base functions $\\Phi_k(t_i)$ (e.g. trend and annual oscillation) given by parametrizationTemporal. The temporal base functions are evaluated at the mid time $t_i$ of each interval $i$, multiplicated with the design matrix $\\M A_i$ of the selected parameters, and the design matrix is extended accordingly.
With estimateShortTimeVariations short time variations of the gravity field can be co-estimated. Their purpose is to mitigate temporal aliasing. The short time parameters selected by parameterSelection (e.g. daily constant or linear splines every 6 hour) are constrained by an autoregressiveModelSequence. If only a static parameter set is selected the coressponding part of the design matrix is copied and modeled as a constant value per interval in inputfileArcList additionally so the corresponding temporal factor can be expressed as \\[ \\Phi_i(t) = \\begin{cases} 1 &\\text{if} \\hspace{5pt} t \\in [t_i, t_{i+1}) \\\\ 0 & \\text{otherwise} \\end{cases}. \\] Before writing the normal equations to outputfileNormalEquation short time gravity and satellite specific parameters can be eliminated with eliminateParameter.
Example: For the computation of the mean gravity field ITSG-Grace2018s with additional trend and annual signal the normal equations are computed month by month and accumulated afterwards (see NormalsAccumulate). The observations were divided into daily intervals with inputfileArcList. The static gravity field has been parametrized as spherical harmonics up to degree $n=200$ in observation:parametrizationGravity. The trend and annual signals defined by estimateLongTimeVariations:parametrizationTemporal were estimated for selected parameters up to degree $n=120$. To mitigate temporal aliasing daily gravity fields up to degree $n=40$ were setup and constrained with an autoregressiveModelSequence up to order three.
A detailed description of the approach is given in: Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1.'},
'NormalsCreate': { 'name': 'NormalsCreate', 'key': 'NormalsCreate', 'description': 'Create normal equations from calculated matrices ( Create ). The Create can be created with ParameterNamesCreate . The Create must be symmetric. The Create must have the same number of rows and can contain multiple columns for multiple solutions. The Vector is the quadratic sum of observations for each column of the right hand side. It is used to determine the aposteriori accuracy If the vector is not given, it is automatically determined by assuming . The number of observations is given by the expression observationCount . The variable observationCount can be used, if it is set by a normal equation file Create .', 'config_table': 'outputfileNormalEquation filename inputfileParameterNames filename normalMatrix matrixGeneratorType rightHandSide matrixGeneratorType lPl matrixGeneratorType vector with size of rhs columns inputfileNormalEquationObsCount filename sets the variable observationCount observationCount expression (variables: rows, columns (rhs), observationCount)', 'display_text': 'Create normal equations from calculated matrices (matrixGenerator).
The normalMatrix must be symmetric. The rightHandSide must have the same number of rows and can contain multiple columns for multiple solutions.
The Vector $\\M l^T\\M P\\M l$ is the quadratic sum of observations for each column of the right hand side. It is used to determine the aposteriori accuracy \\[ \\hat{\\sigma}^2 = \\frac{\\hat{\\M e}^T\\M P\\hat{\\M e}}{n-m} = \\frac{\\M l^T\\M P\\M l - \\M n^T\\hat{\\M x}}{n-m}. \\]If the vector is not given, it is automatically determined by assuming $\\hat{\\sigma}^2=1$.
The number of observations $n$ is given by the expression observationCount. The variable observationCount can be used, if it is set by a normal equation file inputfileNormalEquationObsCount.'},
'NormalsEliminate': { 'name': 'NormalsEliminate', 'key': 'NormalsEliminate', 'description': 'This program eliminates parameters from a system of This program eliminates parameters from a system of s. To just remove (cutting out) parameters use NormalsReorder . The This program eliminates parameters from a system of allows the selection of parameters that will remain, all others will be eliminated. The order of remaining parameters can be modified via the parameter selection. Block size of the output normal matrix can be adjusted with outBlockSize . If it is set to zero, the This program eliminates parameters from a system of is written to a single block file. For example the normal equations are divided into two groups of parameters and according to and shall be eliminated, the reduced system of normal equations is given by See also NormalsReorder .', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename remainingParameters parameterSelectorType parameter order/selection of output normal equations outBlockSize uint block size for distributing the normal equations, 0: one block', 'display_text': 'This program eliminates parameters from a system of inputfileNormalEquations. To just remove (cutting out) parameters use NormalsReorder.
The remainingParameters allows the selection of parameters that will remain, all others will be eliminated. The order of remaining parameters can be modified via the parameter selection. Block size of the output normal matrix can be adjusted with outBlockSize. If it is set to zero, the outputfileNormalEquation is written to a single block file.
For example the normal equations are divided into two groups of parameters $\\hat{\\M x}_1$ and $\\hat{\\M x}_2$ according to \\[ \\begin{pmatrix} \\M N_{11} & \\M N_{12} \\\\ \\M N_{21} & \\M N_{22} \\end{pmatrix} \\begin{pmatrix} \\hat{\\M x}_1 \\\\ \\hat{\\M x}_2 \\end{pmatrix} = \\begin{pmatrix} \\M n_1 \\\\ \\M n_2 \\end{pmatrix}. \\]and $\\hat{\\M x}_2$ shall be eliminated, the reduced system of normal equations is given by \\[ \\bar{\\M N}\\hat{\\M x} = \\bar{\\M n} \\qquad\\text{with}\\qquad \\bar{\\M N}=\\M N_{11}-\\M N_{12}\\M N_{22}^{-1}\\M N_{12}^T \\qquad\\text{and}\\qquad\\bar{\\M n} = \\M n_1 - \\M N_{12}\\M N_{22}^{-1}\\M n_2. \\] See also NormalsReorder.'},
'NormalsMultiplyAdd': { 'name': 'NormalsMultiplyAdd', 'key': 'NormalsMultiplyAdd', 'description': 'This program modifies This program modifies in a way that is estimated instead of . where is This program modifies and is factor . This can be used to re-add reduced reference fields before a combined estimation at normal equation level. Therefore the right hand side of the normal equations is modified by and the quadratic sum of observations by As the normal matrix itself is not modified, rewriting of the matrix can be disabled by setting writeNormalMatrix to false.', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename inputfileParameter filename x factor double alpha writeNormalMatrix boolean write full coefficient matrix, right hand sides and info files', 'display_text': 'This program modifies inputfileNormalEquation in a way that $\\bar{\\M x}$ is estimated instead of $\\M x$. \\[ \\bar{\\M x} := \\M x + \\alpha\\, \\M x_0, \\]where $\\M x_0$ is inputfileParameter and $\\alpha$ is factor. This can be used to re-add reduced reference fields before a combined estimation at normal equation level. Therefore the right hand side of the normal equations is modified by \\[ \\bar{\\M n} := \\M n + \\alpha\\,\\M N\\M x_0, \\]and the quadratic sum of observations by \\[ \\bar{\\M l^T\\M P\\M l} := \\M l^T\\M P\\M l + \\alpha^2\\,\\M x_0^T\\M N\\M x_0 + 2\\alpha\\,\\M x_0^T\\M n \\] As the normal matrix itself is not modified, rewriting of the matrix can be disabled by setting writeNormalMatrix to false.'},
'NormalsRegularizationBorders': { 'name': 'NormalsRegularizationBorders', 'key': 'NormalsRegularizationBorders', 'description': 'This program sets up two regularization matrices for two different regional areas. For a given set of points defined by This program sets up two regularization matrices for two different regional areas. For a given set of points defined by it is evaluated, whether each point (corresponding to an unknown parameter of a respective parameterization by space localizing basis functions) is inside or outside a certain area given by This program sets up two regularization matrices for two different regional areas. For a given set of points defined by . Each regularization matrix is a diagonal matrix, one of them features a one if the point is inside, and a zero if the point lies outside the area. The other matrix features a zero if the point is inside, and a one if the point lies outside the area This results in two regularization matrices with The two matrices are provided as vectors of the diagonal in the output files This program sets up two regularization matrices for two different regional areas. For a given set of points defined by and This program sets up two regularization matrices for two different regional areas. For a given set of points defined by . The regularization matrices are then used by This program sets up two regularization matrices for two different regional areas. For a given set of points defined by . As an example, the two different areas could be oceanic regions on the one hand and continental areas on the other hand.', 'config_table': 'outputfileInside filename outputfileOutside filename grid gridType nodal point distribution of parameters, e.g harmonics splines border borderType regularization areas, e.g land and ocean R double reference radius for ellipsoidal coordinates for border inverseFlattening double reference flattening for ellipsoidal coordinates for border, 0: spherical coordinates', 'display_text': 'This program sets up two regularization matrices for two different regional areas. For a given set of points defined by grid it is evaluated, whether each point (corresponding to an unknown parameter of a respective parameterization by space localizing basis functions) is inside or outside a certain area given by border. Each regularization matrix is a diagonal matrix, one of them features a one if the point is inside, and a zero if the point lies outside the area. The other matrix features a zero if the point is inside, and a one if the point lies outside the area This results in two regularization matrices with \\[ \\M R_1+\\M R_2=\\M I. \\]The two matrices are provided as vectors of the diagonal in the output files outputfileOutside and outputfileInside. The regularization matrices are then used by normalEquation:regularization. As an example, the two different areas could be oceanic regions on the one hand and continental areas on the other hand.'},
'NormalsRegularizationSphericalHarmonics': { 'name': 'NormalsRegularizationSphericalHarmonics', 'key': 'NormalsRegularizationSphericalHarmonics', 'description': 'Diagonal regularization matrix from gravity field accuracies, if not given from signal (cnm,snm), if not given from kaulas rule. The inverse accuracies are used as weights in the regularization matrix. The diagonal is saved as Vector. The corresponding pseudo observations can be computed with Gravityfield2SphericalHarmonicsVector .', 'config_table': 'outputfileDiagonalmatrix filename gravityfield gravityfieldType use sigmas, if not given use signal (cnm,snm), if not given use kaulas rule minRegularizationDegree uint maxRegularizationDegree uint minDegree uint maxDegree uint numbering sphericalHarmonicsNumberingType numbering scheme for regul matrix GM double Geocentric gravitational constant R double reference radius makeIsotropic boolean kaulaPower double sigma = kaulaFactor*degree**kaulaPower kaulaFactor double sigma = kaulaFactor*degree**kaulaPower', 'display_text': 'Diagonal regularization matrix from gravity field accuracies, if not given from signal (cnm,snm), if not given from kaulas rule. The inverse accuracies $1/\\sigma_n^2$ are used as weights in the regularization matrix. The diagonal is saved as Vector.
The corresponding pseudo observations can be computed with Gravityfield2SphericalHarmonicsVector.'},
'NormalsReorder': { 'name': 'NormalsReorder', 'key': 'NormalsReorder', 'description': 'Reorder Reorder by selecting parameters in a specific order. The Reorder also allows one to change dimension of the normal equations, either by cutting parameters or by inserting zero rows/columns for additional parameters. Without Reorder the order of parameters remains the same. Additionally the block sizes of the files can be adjusted. If outBlockSize is set to zero, the normal matrix is written to a single block file, which is needed by some programs. To eliminate parameters without changing the result of the other parameters use NormalsEliminate .', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename parameterSelection parameterSelectorType parameter order/selection of output normal equations outBlockSize uint block size for distributing the normal equations, 0: one block', 'display_text': 'Reorder inputfileNormalEquation by selecting parameters in a specific order. The parameterSelection also allows one to change dimension of the normal equations, either by cutting parameters or by inserting zero rows/columns for additional parameters. Without parameterSelection the order of parameters remains the same. Additionally the block sizes of the files can be adjusted. If outBlockSize is set to zero, the normal matrix is written to a single block file, which is needed by some programs.
To eliminate parameters without changing the result of the other parameters use NormalsEliminate.'},
'NormalsReorderAndAccumulate': { 'name': 'NormalsReorderAndAccumulate', 'key': 'NormalsReorderAndAccumulate', 'description': 'This program accumulates This program accumulates s with respect to the parameter names and writes the total combined system to This program accumulates . The combined normal equation is extended to include all parameter names uniquely from all input normals. The input normals are sorted so that parameters with the same name are accumulated. This requires that the names in each normal equation are unique. The output can be written as multiple small block files with outBlockSize , or as single block with outBlockSize =0, or blocked with respect to the first part of the parameter names (object), if outBlockSize left empty. See also NormalsBuild and NormalsAccumulate .', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename outBlockSize uint block size for distributing the normal equations, 0: one block, empty: blocking by objects', 'display_text': 'This program accumulates inputfileNormalEquations with respect to the parameter names and writes the total combined system to outputfileNormalequation.
The combined normal equation is extended to include all parameter names uniquely from all input normals. The input normals are sorted so that parameters with the same name are accumulated. This requires that the names in each normal equation are unique.
The output can be written as multiple small block files with outBlockSize, or as single block with outBlockSize=0, or blocked with respect to the first part of the parameter names (object), if outBlockSize left empty.
See also NormalsBuild and NormalsAccumulate.'},
'NormalsScale': { 'name': 'NormalsScale', 'key': 'NormalsScale', 'description': 'Scales rows and columns of a system of Scales rows and columns of a system of given by a diagonal matrix Scales rows and columns of a system of The estimated solution is now This is effectively the same as rescaling columns of the design matrix. This program is useful when combining normal equations from different sources, for example in case the units of certain parameters don\'t match.', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename inputfileFactorVector filename Vector containing the factors', 'display_text': 'Scales rows and columns of a system of inputfileNormalEquation given by a diagonal matrix inputfileFactorVector $\\M S$ \\[ \\bar{\\M N} := \\M S \\M N \\M S \\qquad\\text{and}\\qquad \\bar{\\M n} := \\M S \\M n. \\]The estimated solution is now \\[ \\bar{\\M x} := \\M S^{-1} \\M x. \\]This is effectively the same as rescaling columns of the design matrix. This program is useful when combining normal equations from different sources, for example in case the units of certain parameters don\'t match.'},
'NormalsSolverVCE': { 'name': 'NormalsSolverVCE', 'key': 'NormalsSolverVCE', 'description': 'This program accumulates This program accumulates and solves the total combined system. The relative weigthing between the individual normals is determined iteratively by means of variance component estimation (VCE). For a detailed description of the used algorithm see This program accumulates . Besides the estimated parameter vector ( This program accumulates ) the estimated accuracies ( This program accumulates ) and the full covariance matrix ( This program accumulates ) can be saved. Also the combined normal system can be written to This program accumulates . The This program accumulates is a matrix with rows for each estimated parameter and columns for each This program accumulates and indicates the contribution of the individual normals to the estimated parameters. Each row sum up to one. See also NormalsBuild .', 'config_table': 'outputfileSolution filename parameter vector outputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) outputfileCovariance filename full covariance matrix outputfileContribution filename contribution of normal system components to the solution vector outputfileVarianceFactors filename estimated variance factors as vector outputfileNormalEquation filename the combined normal equation system normalEquation normalEquationType inputfileApproxSolution filename to accelerate convergence rightHandSideNumberVCE uint the right hand side number for estimation of variance factors normalsBlockSize uint block size for distributing the normal equations, 0: one block maxIterationCount uint maximum number of iterations for variance component estimation', 'display_text': 'This program accumulates normalEquation and solves the total combined system. The relative weigthing between the individual normals is determined iteratively by means of variance component estimation (VCE). For a detailed description of the used algorithm see normalEquation.
The outputfileContribution is a matrix with rows for each estimated parameter and columns for each normalEquation and indicates the contribution of the individual normals to the estimated parameters. Each row sum up to one.
See also NormalsBuild.'},
'NormalsTemporalCombination': { 'name': 'NormalsTemporalCombination', 'key': 'NormalsTemporalCombination', 'description': 'This program reads a times series of This program reads a times series of with asscociated This program reads a times series of and setup a new combined normal equation system. For each parameter a This program reads a times series of is used. It can be used to estimate trend and annual spherical harmonic coefficients from monthly GRACE normal equations.', 'config_table': 'outputfileNormalEquation filename inputfileNormalEquation filename normal equations for each point in time timeSeries timeSeriesType times of each normal equations parametrizationTemporal parametrizationTemporalType', 'display_text': 'This program reads a times series of inputfileNormalequation with asscociated timeSeries and setup a new combined normal equation system. For each parameter a parametrizationTemporal is used.
It can be used to estimate trend and annual spherical harmonic coefficients from monthly GRACE normal equations.'},
'ParameterNamesCreate': { 'name': 'ParameterNamesCreate', 'key': 'ParameterNamesCreate', 'description': 'Generate a Generate a by Generate a . This file can be used in NormalsCreate or in the class Generate a .', 'config_table': 'outputfileParameterNames filename output parameter names file parameterName parameterNamesType', 'display_text': 'Generate a outputfileParameterNames by parameterName. This file can be used in NormalsCreate or in the class parameterSelector.'},
'ParameterSelection2IndexVector': { 'name': 'ParameterSelection2IndexVector', 'key': 'ParameterSelection2IndexVector', 'description': 'Generate index vector from parameter selection in matrix format . This vector can be used in MatrixCalculate with Generate index vector from parameter selection in to reorder arbitrary vectors and matrices similar to NormalsReorder . The Generate index vector from parameter selection in allows reordering and dimension changes, either by cutting parameters or by inserting additional parameters. Generate index vector from parameter selection in contains indices of parameters in Generate index vector from parameter selection in or -1 for newly added parameters. Generate index vector from parameter selection in contains the selected parameter names.', 'config_table': 'outputfileIndexVector filename indices of source parameters in target normal equations outputfileParameterNames filename output parameter names file inputfileParameterNames filename parameter names file of source normal equations parameterSelection parameterSelectorType parameter order/selection of target normal equations', 'display_text': 'Generate index vector from parameter selection in matrix format. This vector can be used in MatrixCalculate with matrix:reorder to reorder arbitrary vectors and matrices similar to NormalsReorder.
The parameterSelection allows reordering and dimension changes, either by cutting parameters or by inserting additional parameters. outputfileIndexVector contains indices of parameters in inputfileParameterNames or -1 for newly added parameters. outputfileParameterNames contains the selected parameter names.'},
'Orbit2ArgumentOfLatitude': { 'name': 'Orbit2ArgumentOfLatitude', 'key': 'Orbit2ArgumentOfLatitude', 'description': 'This program computes the argument of latitude of an orbit and writes it as instrument file (MISCVALUE(S)). The data of This program computes the argument of latitude of an are appended as values to each epoch.', 'config_table': 'outputfileArgOfLatitude filename instrument file (MISCVALUE(S): argLat, ...) inputfileOrbit filename inputfileInstrument filename data are appended', 'display_text': 'This program computes the argument of latitude of an orbit and writes it as instrument file (MISCVALUE(S)). The data of inputfileInstrument are appended as values to each epoch.
'},
'Orbit2BetaPrimeAngle': { 'name': 'Orbit2BetaPrimeAngle', 'key': 'Orbit2BetaPrimeAngle', 'description': 'This program computes the beta prime angle (between the orbital plane and earth-sun direction) and writes it as MISCVALUE(S) instrument file . The angle is calculated w.r.t the sun (per default), but can be changed. The data of This program computes the beta prime angle (between the orbital plane and earth-sun direction) and writes it as MISCVALUE(S) are appended as values to each epoch.', 'config_table': 'outputfileBetaAngle filename instrument file (MISCVALUE(S): beta\', ...) inputfileOrbit filename inputfileInstrument filename data are appended ephemerides ephemeridesType planet planetType', 'display_text': 'This program computes the beta prime angle (between the orbital plane and earth-sun direction) and writes it as MISCVALUE(S) instrument file. The angle is calculated w.r.t the sun (per default), but can be changed. The data of inputfileInstrument are appended as values to each epoch.'},
'Orbit2EarthFixedOrbit': { 'name': 'Orbit2EarthFixedOrbit', 'key': 'Orbit2EarthFixedOrbit', 'description': 'Normally the orbits in GROOPS are given in the celestial reference frame (CRF) with the origin in the center of mass (CoM). This program rotates the orbit with Normally the orbits in GROOPS are given in the celestial reference frame (CRF) with the origin in the center of mass (CoM). This program rotates the orbit with from CRF to the TRF. To additionally tranform into the center of solid Earth (CE) frame (or center of Figure (CF)), a correction can be applied by providing degree one coefficients of a Normally the orbits in GROOPS are given in the celestial reference frame (CRF) with the origin in the center of mass (CoM). This program rotates the orbit with (e.g. ocean tides). If celestial2terrestrial is set to no, the inverse transformation is applied. See also InstrumentRotate .', 'config_table': 'outputfileOrbit filename inputfileOrbit filename earthRotation earthRotationType transformation from CRF to TRF gravityfield gravityfieldType degree 1 fluid mantle for CM2CE correction celestial2terrestrial boolean yes: crf->trf, no: trf->crf', 'display_text': 'Normally the orbits in GROOPS are given in the celestial reference frame (CRF) with the origin in the center of mass (CoM). This program rotates the orbit with earthRotation from CRF to the TRF.
To additionally tranform into the center of solid Earth (CE) frame (or center of Figure (CF)), a correction can be applied by providing degree one coefficients of a gravityfield (e.g. ocean tides).
If celestial2terrestrial is set to no, the inverse transformation is applied.
See also InstrumentRotate.'},
'Orbit2EclipseFactor': { 'name': 'Orbit2EclipseFactor', 'key': 'Orbit2EclipseFactor', 'description': 'This program generates an instrument file (MISCVALUE(S)) containing the eclipse factor for a given set of orbit. The data of This program generates an are appended as values to each epoch.', 'config_table': 'outputfileEclipseFactor filename instrument file (MISCVALUE(S): eclipse, ...) inputfileOrbit filename inputfileInstrument filename data are appended ephemerides ephemeridesType eclipse eclipseType', 'display_text': 'This program generates an instrument file (MISCVALUE(S)) containing the eclipse factor for a given set of orbit. The data of inputfileInstrument are appended as values to each epoch.'},
'Orbit2Groundtracks': { 'name': 'Orbit2Groundtracks', 'key': 'Orbit2Groundtracks', 'description': 'This program write satellites positions as gridded data ( outputfileTrackGriddedData ) in a terrestrial reference frame. The points are expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening . The orbit data are given in the celestial frame so This program write is needed to transform the data into the terrestrial frame. The data of This program write are appended as values to each point.', 'config_table': 'outputfileGriddedData filename positions as gridded data inputfileOrbit filename inputfileInstrument filename values at grid points earthRotation earthRotationType transformation from CRF to TRF R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program write satellites positions as gridded data (outputfileTrackGriddedData) in a terrestrial reference frame. The points are expressed as ellipsoidal coordinates (longitude, latitude, height) based on a reference ellipsoid with parameters R and inverseFlattening. The orbit data are given in the celestial frame so earthRotation is needed to transform the data into the terrestrial frame. The data of inputfileInstrument are appended as values to each point.'},
'Orbit2Kepler': { 'name': 'Orbit2Kepler', 'key': 'Orbit2Kepler', 'description': 'This program computes the osculating Keplerian elements from position and velocity of a given This program computes the osculating Keplerian elements from position and velocity of a given . The This program computes the osculating Keplerian elements from position and velocity of a given must contain positions and velocities (see OrbitAddVelocityAndAcceleration ). The outputfileKepler is an instrument file (MISCVALUES) with the Keplerian elements at each epoch in the following order Ascending Node [degree] Inclination [degree] Argument of perigee [degree] major axis [m] eccentricity mean anomaly [degree] transit time of perigee [mjd] The data of This program computes the osculating Keplerian elements from position and velocity of a given are appended as values to each epoch.', 'config_table': 'outputfileKepler filename instrument file (MISCVALUES: Omega, i, omega [degree], a [m], e, M [degree], tau [mjd], ...) inputfileOrbit filename position and velocity at each epoch define the kepler orbit inputfileInstrument filename data is appended GM double Geocentric gravitational constant', 'display_text': 'This program computes the osculating Keplerian elements from position and velocity of a given inputfileOrbit. The inputfileOrbit must contain positions and velocities (see OrbitAddVelocityAndAcceleration).
The outputfileKepler is an instrument file (MISCVALUES) with the Keplerian elements at each epoch in the following order
Ascending Node $\\Omega$ [degree]
Inclination $i$ [degree]
Argument of perigee $\\omega$ [degree]
major axis $a$ [m]
eccentricity $e$
mean anomaly $M$ [degree]
transit time of perigee $\\tau$ [mjd]
The data of inputfileInstrument are appended as values to each epoch.'},
'Orbit2MagneticField': { 'name': 'Orbit2MagneticField', 'key': 'Orbit2MagneticField', 'description': 'This program computes the magentic field vector( in CRF)) along an orbit and writes it as instrument file (MISCVALUES). The data of This program computes the magentic field vector( are appended as data columns to each epoch.', 'config_table': 'outputfileMagneticField filename instrument file (x,y,z in CRF [Tesla = kg/A/s^2]), ...) inputfileOrbit filename inputfileInstrument filename data are appended to output file magnetosphere magnetosphereType earthRotation earthRotationType', 'display_text': 'This program computes the magentic field vector($x, y, z$ $[Tesla = kg/A/s^2]$ in CRF)) along an orbit and writes it as instrument file (MISCVALUES). The data of inputfileInstrument are appended as data columns to each epoch.'},
'Orbit2ThermosphericState': { 'name': 'Orbit2ThermosphericState', 'key': 'Orbit2ThermosphericState', 'description': 'This program computes the thermosperic state (density, temperature, wind (x,y,z in CRF)) based on emprical models along an orbit and writes it as instrument file (MISCVALUES). The wind is given in an celestial reference frame (CRF). The data of This program computes the thermosperic state (density, temperature, wind (x,y,z in CRF)) based on emprical models along an are appended as values to each epoch.', 'config_table': 'outputfileThermosphericState filename instrument file (MISCVALUES: density, temperature, wind (x,y,z in CRF), ...) inputfileOrbit filename inputfileInstrument filename data are appended to output file thermosphere thermosphereType earthRotation earthRotationType', 'display_text': 'This program computes the thermosperic state (density, temperature, wind (x,y,z in CRF)) based on emprical models along an orbit and writes it as instrument file (MISCVALUES). The wind is given in an celestial reference frame (CRF). The data of inputfileInstrument are appended as values to each epoch.'},
'OrbitAddVelocityAndAcceleration': { 'name': 'OrbitAddVelocityAndAcceleration', 'key': 'OrbitAddVelocityAndAcceleration', 'description': 'This program computes velocities and accelerations from a given orbit by differentiating a moving polynomial. The values are saved in one output file which then contains orbit, velocity and acceleration.', 'config_table': 'outputfileOrbit filename inputfileOrbit filename polynomialDegree uint Polynomial degree, must be even!', 'display_text': 'This program computes velocities and accelerations from a given orbit by differentiating a moving polynomial. The values are saved in one output file which then contains orbit, velocity and acceleration.'},
'PlanetOrbit': { 'name': 'PlanetOrbit', 'key': 'PlanetOrbit', 'description': 'Creates an orbit file of sun, moon, or planets. The orbit is given in the celestial reference frame (CRF) or alternatively in the terrestrial reference frame (TRF) if Creates an is provided.', 'config_table': 'outputfileOrbit filename planet planetType timeSeries timeSeriesType ephemerides ephemeridesType earthRotation earthRotationType transform orbits into TRF', 'display_text': 'Creates an orbit file of sun, moon, or planets. The orbit is given in the celestial reference frame (CRF) or alternatively in the terrestrial reference frame (TRF) if earthRotation is provided.'},
'PlotDegreeAmplitudes': { 'name': 'PlotDegreeAmplitudes', 'key': 'PlotDegreeAmplitudes', 'description': 'Plot degree amplitudes of potential coefficients computed by Gravityfield2DegreeAmplitudes or PotentialCoefficients2DegreeAmplitudes using the GMT Generic Mapping Tools ( https://www.generic-mapping-tools.org ). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile . This is a convenience program with meaningful default values. The same plots can be generated with the more general PlotGraph .', 'config_table': 'outputfile filename *.png, *.jpg, *.eps, ... title string layer plotGraphLayerType minDegree uint maxDegree uint majorTickSpacingDegree double boundary annotation minorTickSpacingDegree double frame tick spacing gridLineSpacingDegree double gridline spacing labelDegree string description of the x-axis logarithmicDegree boolean use logarithmic scale for the x-axis minY double maxY double majorTickSpacingY double boundary annotation minorTickSpacingY double frame tick spacing gridLineSpacingY double gridline spacing unitY string appended to axis values labelY string description of the y-axis logarithmicY boolean use logarithmic scale for the y-axis gridLine plotLineType The style of the grid lines. legend plotLegendType options sequence further options... width double in cm height double in cm titleFontSize uint in pt marginTitle double between title and figure [cm] drawGridOnTop boolean grid lines above all other lines/points options string transparent boolean make background transparent dpi uint use this resolution when rasterizing postscript file removeFiles boolean remove .gmt and script files', 'display_text': 'Plot degree amplitudes of potential coefficients computed by Gravityfield2DegreeAmplitudes or PotentialCoefficients2DegreeAmplitudes using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile. This is a convenience program with meaningful default values. The same plots can be generated with the more general PlotGraph.
'},
'PlotGraph': { 'name': 'PlotGraph', 'key': 'PlotGraph', 'description': 'Generates a two dimensional xy plot using the GMT Generic Mapping Tools ( https://www.generic-mapping-tools.org ). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile . The plotting area is defined by the two axes Generates a two dimensional xy plot using the GMT Generic Mapping Tools ( . An alternative Generates a two dimensional xy plot using the GMT Generic Mapping Tools ( on the right hand side can be added. The content of the graph itself is defined by one or more Generates a two dimensional xy plot using the GMT Generic Mapping Tools ( s. The plot programs create a temporary directory in the path of outputfile , writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles =false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options =" FORMAT=value ", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html . See also: PlotDegreeAmplitudes , PlotMap , PlotMatrix , PlotSphericalHarmonicsTriangle .', 'config_table': 'outputfile filename *.png, *.jpg, *.eps, ... title string layer plotGraphLayerType axisX plotAxisType axisY plotAxisType axisY2 plotAxisType Second y-axis on right hand side colorbar plotColorbarType legend plotLegendType options sequence further options... width double in cm height double in cm titleFontSize uint in pt marginTitle double between title and figure [cm] drawGridOnTop boolean grid lines above all other lines/points options string transparent boolean make background transparent dpi uint use this resolution when rasterizing postscript file removeFiles boolean remove .gmt and script files', 'display_text': 'Generates a two dimensional xy plot using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The plotting area is defined by the two axes axisX/Y. An alternative axisY2 on the right hand side can be added. The content of the graph itself is defined by one or more layers.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
See also: PlotDegreeAmplitudes, PlotMap, PlotMatrix, PlotSphericalHarmonicsTriangle.'},
'PlotMap': { 'name': 'PlotMap', 'key': 'PlotMap', 'description': 'Generates a map using the GMT Generic Mapping Tools ( https://www.generic-mapping-tools.org ). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile . The base map is defined by a Generates a map using the GMT Generic Mapping Tools ( of an ellipsoid ( R , inverseFlattening ). The content of the map itself is defined by one or more Generates a map using the GMT Generic Mapping Tools ( s. The plot programs create a temporary directory in the path of outputfile , writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles =false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options =" FORMAT=value ", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html . See also: PlotDegreeAmplitudes , PlotGraph , PlotMatrix , PlotSphericalHarmonicsTriangle .', 'config_table': 'outputfile filename *.png, *.jpg, *.eps, ... title string statisticInfos boolean layer plotMapLayerType R double reference radius for ellipsoidal coordinates on output inverseFlattening double reference flattening for ellipsoidal coordinates on output, 0: spherical coordinates minLambda angle min. longitude (default: compute from input data) maxLambda angle max. longitude (default: compute from input data) minPhi angle min. latitude (default: compute from input data) maxPhi angle max. latitude (default: compute from input data) majorTickSpacing angle boundary annotation minorTickSpacing angle frame tick spacing gridLineSpacing angle gridline spacing colorbar plotColorbarType projection plotMapProjectionType map projection options sequence further options... width double in cm height double in cm titleFontSize uint in pt marginTitle double between title and figure [cm] drawGridOnTop boolean grid lines above all other lines/points options string transparent boolean make background transparent dpi uint use this resolution when rasterizing postscript file removeFiles boolean remove .gmt and script files', 'display_text': 'Generates a map using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The base map is defined by a projection of an ellipsoid (R, inverseFlattening). The content of the map itself is defined by one or more layers.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
'},
'PlotMatrix': { 'name': 'PlotMatrix', 'key': 'PlotMatrix', 'description': 'Plot the coefficients of a Plot the coefficients of a using the GMT Generic Mapping Tools ( https://www.generic-mapping-tools.org ). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile . The plot programs create a temporary directory in the path of outputfile , writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles =false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options =" FORMAT=value ", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html .', 'config_table': 'outputfile filename *.png, *.jpg, *.eps, ... title string inputfileMatrix filename minColumn uint minimum column index to plot maxColumn uint maximum column index to plot majorTickSpacingX double boundary annotation minorTickSpacingX double frame tick spacing gridLineSpacingX double gridline spacing minRow uint minimum row index to plot maxRow uint maximum row index to plot majorTickSpacingY double boundary annotation minorTickSpacingY double frame tick spacing gridLineSpacingY double gridline spacing gridLine plotLineType The style of the grid lines. colorbar plotColorbarType options sequence further options... width double in cm height double in cm titleFontSize uint in pt marginTitle double between title and figure [cm] drawGridOnTop boolean grid lines above all other lines/points options string transparent boolean make background transparent dpi uint use this resolution when rasterizing postscript file removeFiles boolean remove .gmt and script files', 'display_text': 'Plot the coefficients of a inputfileMatrix using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
'},
'PlotSphericalHarmonicsTriangle': { 'name': 'PlotSphericalHarmonicsTriangle', 'key': 'PlotSphericalHarmonicsTriangle', 'description': 'Plot the potential coefficients of a spherical harmonic expansion using the GMT Generic Mapping Tools ( https://www.generic-mapping-tools.org ). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile . This program plots the formal errors (sigmas). If Plot the potential coefficients of a spherical harmonic expansion using the GMT Generic Mapping Tools ( provides no sigmas e.g. with setSigmasToZero in Plot the potential coefficients of a spherical harmonic expansion using the GMT Generic Mapping Tools ( the coefficients itself are plotted instead. The plot programs create a temporary directory in the path of outputfile , writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles =false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options =" FORMAT=value ", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html .', 'config_table': 'outputfile filename *.png, *.jpg, *.eps, ... title string gravityfield gravityfieldType use sigmas, if not given use signal (cnm,snm) time time at this time the gravity field will be evaluated minDegree uint maxDegree uint majorTickSpacing double boundary annotation minorTickSpacing double frame tick spacing gridLineSpacing double gridline spacing gridLine plotLineType The style of the grid lines. colorbar plotColorbarType options sequence further options... width double in cm height double in cm titleFontSize uint in pt marginTitle double between title and figure [cm] drawGridOnTop boolean grid lines above all other lines/points options string transparent boolean make background transparent dpi uint use this resolution when rasterizing postscript file removeFiles boolean remove .gmt and script files', 'display_text': 'Plot the potential coefficients of a spherical harmonic expansion using the GMT Generic Mapping Tools (https://www.generic-mapping-tools.org). A variety of image file formats are supported (e.g. png, jpg, eps) determined by the extension of outputfile.
This program plots the formal errors (sigmas). If gravityfield provides no sigmas e.g. with setSigmasToZero in gravityfield:potentialCoefficients the coefficients itself are plotted instead.
The plot programs create a temporary directory in the path of outputfile, writes all needed data into it, generates a batch/shell script with the GMT commands, execute it, and remove the temporary directory. With setting options:removeFiles=false the last step is skipped and it is possible to adjust the plot manually to specific publication needs. Individual GMT settings are adjusted with options:options="FORMAT=value", see https://docs.generic-mapping-tools.org/latest/gmt.conf.html.
'},
'PreprocessingDualSst': { 'name': 'PreprocessingDualSst', 'key': 'PreprocessingDualSst', 'description': 'This programs processes satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. Four different observation groups are considered separately: two types of SST and POD1/POD2 for the two satellites. This program works similar to PreprocessingSst , see there for details. Here only the settings explained, which are different. Both SST observation types are reduced by the same background models and the same impact of accelerometer measurements. The covariance matrix of the reduced observations should not consider the the instrument noise only ( This programs processes satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. Four different observation groups are considered separately: two types of SST and POD1/POD2 for the two satellites. This program works similar to ) but must take the cross correlations This programs processes satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. Four different observation groups are considered separately: two types of SST and POD1/POD2 for the two satellites. This program works similar to into account. The covariance matrix of the reduced observations is given by', 'config_table': 'outputfileSolution filename estimated parameter vector (static part only) outputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) outputfileParameterName filename estimated signal parameters (index is appended) estimateArcSigmas sequence outputfileSigmasPerArcSst1 filename accuracies of each arc (SST1) outputfileSigmasPerArcSst2 filename accuracies of each arc (SST2) outputfileSigmasPerArcAcc filename accuracies of each arc (ACC) outputfileSigmasPerArcPod1 filename accuracies of each arc (POD1) outputfileSigmasPerArcPod2 filename accuracies of each arc (POD2) estimateEpochSigmas sequence outputfileSigmasPerEpochSst1 filename accuracies of each epoch (SST1) outputfileSigmasPerEpochSst2 filename accuracies of each epoch (SST2) outputfileSigmasPerEpochAcc filename accuracies of each epoch (ACC) outputfileSigmasPerEpochPod1 filename accuracies of each epoch (POD1) outputfileSigmasPerEpochPod2 filename accuracies of each epoch (POD2) estimateCovarianceFunctions sequence outputfileCovarianceFunctionSst1 filename covariance function outputfileCovarianceFunctionSst2 filename covariance function outputfileCovarianceFunctionAcc filename covariance function outputfileCovarianceFunctionPod1 filename covariance functions for along, cross, radial direction outputfileCovarianceFunctionPod2 filename covariance functions for along, cross, radial direction computeResiduals sequence outputfileSst1Residuals filename outputfileSst2Residuals filename outputfileAccResiduals filename outputfilePod1Residuals filename outputfilePod2Residuals filename observation choice obervation equations (Sst) dualSstVariational sequence two SST observations rightHandSide sequence input for observation vectors inputfileSatelliteTracking1 filename ranging observations and corrections inputfileSatelliteTracking2 filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations sstType choice range rangeRate none inputfileVariational1 filename approximate position and integrated state matrix inputfileVariational2 filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst1 parametrizationSatelliteTrackingType satellite tracking parameter for first ranging observations parametrizationSst2 parametrizationSatelliteTrackingType satellite tracking parameter for second ranging observations integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst1 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovarianceMatrixArc filename Must be given per sst arc with correct dimensions. inputfileSigmasCovarianceMatrixArc filename Vector with one sigma for each sampling double [seconds] sampling of the covariance function covarianceSst2 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovarianceMatrixArc filename Must be given per sst arc with correct dimensions. inputfileSigmasCovarianceMatrixArc filename Vector with one sigma for each sampling double [seconds] sampling of the covariance function covarianceAcc sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovarianceMatrixArc filename Must be given per sst arc with correct dimensions. inputfileSigmasCovarianceMatrixArc filename Vector with one sigma for each sampling double [seconds] sampling of the covariance function covariancePod1 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovariancePodEpoch filename 3x3 epoch covariances sampling double [seconds] sampling of the covariance function covariancePod2 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovariancePodEpoch filename 3x3 epoch covariances sampling double [seconds] sampling of the covariance function estimateShortTimeVariations sequence co-estimate short time gravity field variations estimateSigma boolean estimate standard deviation via VCE autoregressiveModelSequence autoregressiveModelSequenceType AR model sequence for constraining short time gravity variations parameterSelection parameterSelectorType parameters describing the short time gravity field downweightPod double downweight factor for POD inputfileArcList filename list to correspond points of time to arc numbers iterationCount uint (maximum) number of iterations for the estimation of calibration parameter and error PSD variableNameIterations string All output fileNames in preprocessing iteration are expanded with this variable prior to writing to disk defaultBlockSize uint block size of static normal equation blocks', 'display_text': 'This programs processes satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. Four different observation groups are considered separately: two types of SST and POD1/POD2 for the two satellites. This program works similar to PreprocessingSst, see there for details. Here only the settings explained, which are different.
Both SST observation types are reduced by the same background models and the same impact of accelerometer measurements. The covariance matrix of the reduced observations should not consider the the instrument noise only (covarianceSst1/2) but must take the cross correlations covarianceAcc into account. The covariance matrix of the reduced observations is given by \\[ \\M\\Sigma(\\begin{bmatrix} \\Delta l_{SST1} \\\\ \\Delta l_{SST2} \\end{bmatrix}) = \\begin{bmatrix} \\M\\Sigma_{SST1} + \\M\\Sigma_{ACC} & \\M\\Sigma_{ACC} \\\\ \\M\\Sigma_{ACC} & \\M\\Sigma_{SST2} + \\M\\Sigma_{ACC} \\end{bmatrix}. \\]'},
'PreprocessingGradiometer': { 'name': 'PreprocessingGradiometer', 'key': 'PreprocessingGradiometer', 'description': 'This program estimates empirical covariance functions of the gradiometer instrument noise and determine arc wise variances to downweight arcs with outliers. This program works similar to PreprocessingPod , see there for details. Here only the settings explained, which are different. ...', 'config_table': 'outputfileCovarianceFunction filename outputfileSigmasPerArc filename accuracies of each arc outputfileSggResiduals filename rightHandSide sggRightSideType input for the observation vector inputfileOrbit filename inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType parametrizationBias parametrizationTemporalType per arc covarianceSgg sequence inputfileCovarianceFunction filename approximate covariances in time covarianceLength uint counts observation epochs sampling double [seconds] sampling of the covariance function iterationCount uint for the estimation of calibration parameter and error PSD', 'display_text': 'This program estimates empirical covariance functions of the gradiometer instrument noise and determine arc wise variances to downweight arcs with outliers. This program works similar to PreprocessingPod, see there for details. Here only the settings explained, which are different.
...'},
'PreprocessingPod': { 'name': 'PreprocessingPod', 'key': 'PreprocessingPod', 'description': 'This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers. A complete least squares adjustment for gravity field determination is performed by computing the observation equations, see This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers. or This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers. for details. The normal equations are accumulated and solved to This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers. together with the estimated accuracies This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers. . The estimated residuals can be computed with computeResiduals . For each component (along, cross, radial) of the kinematic orbit positions a noise covariance function is estimated The covariance matrix is composed of the sum of matrices and unknown variance factors with the cosine transformation matrices An additional variance factor can be computed ( estimateArcSigmas ) for each arc according to where is the redundancy. This variance factor should be around one for normally behaving arcs as the noise characteristics are already considered by the covariance matrix but bad arcs get a much larger variance. By applying this factor bad arcs or arcs with large outliers are downweighted.', 'config_table': 'outputfileSolution filename estimated parameter vector (static part only) outputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) outputfileParameterName filename names of estimated parameters (static part only) estimateArcSigmas sequence outputfileSigmasPerArcPod filename accuracies of each arc (POD2) estimateCovarianceFunctions sequence outputfileCovarianceFunctionPod filename covariance functions for along, cross, radial direction computeResiduals sequence outputfilePodResiduals filename observation choice obervation equations (POD) podIntegral sequence precise orbit data (integral approach) inputfileSatelliteModel filename satellite macro model rightHandSide podRightSideType input for the reduced observation vector inputfileOrbit filename used to evaluate the observation equations, not used as observations inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters keepSatelliteStates boolean set boundary values of each arc global integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n accelerateComputation boolean acceleration of computation by transforming the observations podVariational sequence precise orbit data (variational equations) rightHandSide sequence input for observation vectors inputfileOrbit filename kinematic positions as observations inputfileVariational filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n accelerateComputation boolean acceleration of computation by transforming the observations covariancePod sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuracies for each arc (multiplied with sigma) inputfileCovarianceFunction filename approximate covariances in time inputfileCovariancePodEpoch filename 3x3 epoch covariances sampling double [seconds] sampling of the covariance function inputfileArcList filename list to correspond points of time to arc numbers adjustmentThreshold double Adjustment factor threshold: Iteration will be stopped once both SST and POD adjustment factors are under this threshold iterationCount uint (maximum) number of iterations for the estimation of calibration parameters and error PSD', 'display_text': 'This program estimates empirical covariance functions of the instrument noise and determines arc-wise variances to downweight arcs with outliers.
A complete least squares adjustment for gravity field determination is performed by computing the observation equations, see observation:podIntegral or observation:podVariational for details. The normal equations are accumulated and solved to outputfileSolution together with the estimated accuracies outputfileSigmax. The estimated residuals $\\hat{\\M e}=\\M l-\\M A\\hat{\\M x}$ can be computed with computeResiduals.
For each component (along, cross, radial) of the kinematic orbit positions a noise covariance function is estimated \\[ \\text{cov}(\\Delta t_i) = \\sum_{n=0}^{N-1} a_n^2 \\cos\\left(\\frac{\\pi}{T} n\\Delta t_i\\right). \\]The covariance matrix is composed of the sum of matrices $F_n$ and unknown variance factors \\[ \\M\\Sigma = a_1^2\\M F_1 + a_2^2 \\M F_2 + \\cdots + a_N^2\\M F_N, \\]with the cosine transformation matrices \\[ \\M F_n = \\left(\\cos\\left(\\frac{\\pi}{T} n(t_i-t_k)\\right)\\right)_{ik}. \\] An additional variance factor can be computed (estimateArcSigmas) for each arc $k$ according to \\[ \\hat{\\sigma}_k^2 = \\frac{\\hat{\\M e}_k^T\\M\\Sigma^{-1}\\hat{\\M e}_k}{r_k}, \\]where $r_k$ is the redundancy. This variance factor should be around one for normally behaving arcs as the noise characteristics are already considered by the covariance matrix but bad arcs get a much larger variance. By applying this factor bad arcs or arcs with large outliers are downweighted.'},
'PreprocessingSst': { 'name': 'PreprocessingSst', 'key': 'PreprocessingSst', 'description': 'This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to PreprocessingPod , see there for details. Here only deviations in the settings are explained. Precise orbit data (POD) often contains systematic errors in addition to stochastic noise. In this case the variance component estimation fails and assigns too much weight to the POD data. Therefore an additional downweightPod factor can be applied to the standard deviation of POD for the next least squares adjustment in the iteration. This factor should also applied as sigma in This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to for computation of the final solution e.g. with NormalsSolverVCE . Short time variations of the gravity field can be co-estimated together with the static/monthly mean gravity field. The short time parameters must also be set in This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to and can then be selected by This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to . If these parameters are not time variable, for example when a range of static parameters is selected, they are set up as constant for each time interval defined in inputfileArcList . The parameters are constrained by an This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to . The weight of the constrain equations in terms of the standard deviation can be estimated by means of Variance Component Estimation (VCE) if estimateShortTimeVariations:estimateSigma is set. The mathematical background of this co-estimation can be found in: Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1 .', 'config_table': 'outputfileSolution filename estimated parameter vector (static part only) outputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) outputfileParameterName filename estimated signal parameters (index is appended) estimateArcSigmas sequence outputfileSigmasPerArcSst filename accuracies of each arc (SST) outputfileSigmasPerArcPod1 filename accuracies of each arc (POD1) outputfileSigmasPerArcPod2 filename accuracies of each arc (POD2) estimateEpochSigmas sequence outputfileSigmasPerEpochSst filename accuracies of each epoch (SST) outputfileSigmasPerEpochPod1 filename accuracies of each epoch (POD1) outputfileSigmasPerEpochPod2 filename accuracies of each epoch (POD2) estimateCovarianceFunctions sequence outputfileCovarianceFunctionSst filename covariance function outputfileCovarianceFunctionPod1 filename covariance functions for along, cross, radial direction outputfileCovarianceFunctionPod2 filename covariance functions for along, cross, radial direction estimateSstArcCovarianceSigmas sequence outputfileSigmasCovarianceMatrixArc filename one variance factor per matrix computeResiduals sequence outputfileSstResiduals filename outputfilePod1Residuals filename outputfilePod2Residuals filename observation choice obervation equations (Sst) sstIntegral sequence integral approach inputfileSatelliteModel1 filename satellite macro model inputfileSatelliteModel2 filename satellite macro model rightHandSide sstRightSideType input for the reduced observation vector sstType choice range rangeRate rangeAcceleration none inputfileOrbit1 filename used to evaluate the observation equations, not used as observations inputfileOrbit2 filename used to evaluate the observation equations, not used as observations inputfileStarCamera1 filename inputfileStarCamera2 filename earthRotation earthRotationType ephemerides ephemeridesType gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst parametrizationSatelliteTrackingType satellite tracking parameter keepSatelliteStates boolean set boundary values of each arc global integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n sstVariational sequence variational equations rightHandSide sequence input for observation vectors inputfileSatelliteTracking filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations sstType choice range rangeRate none inputfileVariational1 filename approximate position and integrated state matrix inputfileVariational2 filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst parametrizationSatelliteTrackingType satellite tracking parameter integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovarianceMatrixArc filename Must be given per sst arc with correct dimensions. inputfileSigmasCovarianceMatrixArc filename Vector with one sigma for each sampling double [seconds] sampling of the covariance function covariancePod1 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovariancePodEpoch filename 3x3 epoch covariances sampling double [seconds] sampling of the covariance function covariancePod2 sequence sigma double apriori factor of covariance function inputfileSigmasPerArc filename apriori different accuaries for each arc (multiplicated with sigma) inputfileSigmasPerEpoch filename apriori different accuaries for each epoch inputfileCovarianceFunction filename approximate covariances in time inputfileCovariancePodEpoch filename 3x3 epoch covariances sampling double [seconds] sampling of the covariance function estimateShortTimeVariations sequence co-estimate short time gravity field variations estimateSigma boolean estimate standard deviation via VCE autoregressiveModelSequence autoregressiveModelSequenceType AR model sequence for constraining short time gravity variations parameterSelection parameterSelectorType parameters describing the short time gravity field downweightPod double downweight factor for POD inputfileArcList filename list to correspond points of time to arc numbers iterationCount uint (maximum) number of iterations for the estimation of calibration parameter and error PSD variableNameIterations string All output fileNames in preprocessing iteration are expanded with this variable prior to writing to disk defaultBlockSize uint block size of static normal equation blocks', 'display_text': 'This program processes satellite-to-satellite-tracking (SST) and kinematic orbit observations in a GRACE like configuration. Three different observation groups are considered separately: SST and POD1/POD2 for the two satellites. This program works similar to PreprocessingPod, see there for details. Here only deviations in the settings are explained.
Precise orbit data (POD) often contains systematic errors in addition to stochastic noise. In this case the variance component estimation fails and assigns too much weight to the POD data. Therefore an additional downweightPod factor can be applied to the standard deviation of POD for the next least squares adjustment in the iteration. This factor should also applied as sigma in observation for computation of the final solution e.g. with NormalsSolverVCE.
Short time variations of the gravity field can be co-estimated together with the static/monthly mean gravity field. The short time parameters must also be set in observation:parametrizationGravity and can then be selected by estimateShortTimeVariations:parameterSelection. If these parameters are not time variable, for example when a range of static parameters is selected, they are set up as constant for each time interval defined in inputfileArcList. The parameters are constrained by an estimateShortTimeVariations:autoregressiveModelSequence. The weight of the constrain equations in terms of the standard deviation can be estimated by means of Variance Component Estimation (VCE) if estimateShortTimeVariations:estimateSigma is set. The mathematical background of this co-estimation can be found in:
Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1.'},
'PreprocessingVariationalEquation': { 'name': 'PreprocessingVariationalEquation', 'key': 'PreprocessingVariationalEquation', 'description': 'This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by . This means for each arc new initial state parameters are set up. In a first step the This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by acting on the satellite are evaluated at the apriori positions given by This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by . Non-conservative forces like solar radiation pressure need the orientation of the satellite ( This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by ) and additionally, a satellite macro model ( satelliteModel ) with the surface properties. Furthermore This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by observations are also considered. In a second step the accelerations are integrated twice to a dynamic orbit using a moving polynomial with the degree integrationDegree . The orbit is corrected to be self-consistent. This means the forces should be evaluated at the new integrated positions instead of the apriori ones. This correction is computed in a linear approximation using the gradient of the forces with respect to the positions ( gradientfield ). As this term is small generally only the largest force components have to be considered. A low degree spherical harmonic expansion of the static gravity field (about up to degree 5) is sufficient in almost all cases. In this step also the state transition matrix (the partial derivatives of the current state, position and velocity) with respect to the initial state is computed. The integrated orbit together with the state transitions are stored in This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by , the integrated orbit only in This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by . To improve the numerical stability a reference ellipse can be reduced beforehand using Enke\'s method ( useEnke ). Mathematically the result is the same, but as the large central term is removed before and restored afterwards more digits are available for the computation. The integrated orbit should be fitted to observations afterwards by the programs PreprocessingVariationalEquationOrbitFit and/or PreprocessingVariationalEquationSstFit . They apply a least squares adjustment by estimating some satellite parameters (e.g. an accelerometer bias). If the fitted orbit is too far away from the original This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by the linearization may not be accurate enough. In this case PreprocessingVariationalEquation should be run again with the fitted orbit as This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by and introducing the estimatedParameters as additional forces.', 'config_table': 'outputfileVariational filename approximate position and integrated state matrix outputfileOrbit filename integrated orbit inputfileSatelliteModel filename satellite macro model inputfileOrbit filename approximate position, used to evaluate the force inputfileStarCamera filename rotation from body frame to CRF inputfileAccelerometer filename non-gravitational forces in satellite reference frame forces forcesType estimatedParameters sequence satellite parameters e.g. from orbit fit parametrizationAcceleration parametrizationAccelerationType orbit force parameters inputfileParameter filename estimated orbit force parameters earthRotation earthRotationType ephemerides ephemeridesType gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement integrationDegree uint integration of forces by polynomial approximation of degree n useEnke sequence integrate differential forces to an elliptical reference trajectory GM double geocentric gravitational constant used for elliptical reference orbit', 'display_text': 'This program integrates an orbit dynamically using the given forces and set up the state transition matrix for each time step. These are the prerequisites for a least squares adjustment (e.g. gravity field determination) using the variational equation approach. The variational equations are computed arc-wise as defined by inputfileOrbit. This means for each arc new initial state parameters are set up.
In a first step the forces acting on the satellite are evaluated at the apriori positions given by inputfileOrbit. Non-conservative forces like solar radiation pressure need the orientation of the satellite (inputfileStarCamera) and additionally, a satellite macro model (satelliteModel) with the surface properties. Furthermore inputfileAccelerometer observations are also considered.
In a second step the accelerations are integrated twice to a dynamic orbit using a moving polynomial with the degree integrationDegree. The orbit is corrected to be self-consistent. This means the forces should be evaluated at the new integrated positions instead of the apriori ones. This correction is computed in a linear approximation using the gradient of the forces with respect to the positions (gradientfield). As this term is small generally only the largest force components have to be considered. A low degree spherical harmonic expansion of the static gravity field (about up to degree 5) is sufficient in almost all cases. In this step also the state transition matrix (the partial derivatives of the current state, position and velocity) with respect to the initial state is computed. The integrated orbit together with the state transitions are stored in outputfileVariational, the integrated orbit only in outputfileOrbit.
To improve the numerical stability a reference ellipse can be reduced beforehand using Enke\'s method (useEnke). Mathematically the result is the same, but as the large central term is removed before and restored afterwards more digits are available for the computation.
The integrated orbit should be fitted to observations afterwards by the programs PreprocessingVariationalEquationOrbitFit and/or PreprocessingVariationalEquationSstFit. They apply a least squares adjustment by estimating some satellite parameters (e.g. an accelerometer bias). If the fitted orbit is too far away from the original inputfileOrbit the linearization may not be accurate enough. In this case PreprocessingVariationalEquation should be run again with the fitted orbit as inputfileOrbit and introducing the estimatedParameters as additional forces.'},
'PreprocessingVariationalEquationOrbitFit': { 'name': 'PreprocessingVariationalEquationOrbitFit', 'key': 'PreprocessingVariationalEquationOrbitFit', 'description': 'This program fits an This program fits an to an observed This program fits an by estimating parameters in a least squares adjustment. Additional to the initial satellite state for each arc, these parameters can be This program fits an , satellite This program fits an and stochastic pulses (velocity jumps) at given times, This program fits an . The estimated parameters can be stored with This program fits an and an extra file with the parameter names is created. The fitted orbit is written as new reference in This program fits an and additionally in This program fits an . The observed orbit positions ( This program fits an ) together with the epoch-wise covariance matrix ( This program fits an ) must be split in the same arcs as the variational equations but not necessarily uniformly distributed (use irregularData in InstrumentSynchronize ). An iterative downweighting of outliers is performed by M-Huber method. The observation equations (parameter sensitivity matrix) are computed by integration of the variational equations ( This program fits an ) using a polynomial with integrationDegree and interpolated to the observation epochs using a polynomial with interpolationDegree . All parameters used here must be reestimated in the full least squares adjustment for the gravity field determination to get a solution which is not biased towards the reference field. The solutions of additional estimations are relative (deltas) as the parameters are already used as Taylor point in the reference orbit. See also PreprocessingVariationalEquation .', 'config_table': 'outputfileVariational filename approximate position and integrated state matrix outputfileOrbit filename integrated orbit outputfileSolution filename estimated calibration and state parameters inputfileVariational filename approximate position and integrated state matrix inputfileOrbit filename kinematic positions of satellite as observations inputfileCovariancePodEpoch filename 3x3 epoch wise covariances ephemerides ephemeridesType may be needed by parametrizationAcceleration parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters stochasticPulse timeSeriesType integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n iterationCount uint for the estimation of calibration parameter and error PSD', 'display_text': 'This program fits an inputfileVariational to an observed inputfileOrbit by estimating parameters in a least squares adjustment. Additional to the initial satellite state for each arc, these parameters can be parametrizationGravity, satellite parametrizationAcceleration and stochastic pulses (velocity jumps) at given times, stochasticPulse. The estimated parameters can be stored with outputfileSolution and an extra file with the parameter names is created. The fitted orbit is written as new reference in outputfileVariational and additionally in outputfileOrbit.
The observed orbit positions (inputfileOrbit) together with the epoch-wise covariance matrix (inputfileCovariancePodEpoch) must be split in the same arcs as the variational equations but not necessarily uniformly distributed (use irregularData in InstrumentSynchronize). An iterative downweighting of outliers is performed by M-Huber method.
The observation equations (parameter sensitivity matrix) are computed by integration of the variational equations (inputfileVariational) using a polynomial with integrationDegree and interpolated to the observation epochs using a polynomial with interpolationDegree.
All parameters used here must be reestimated in the full least squares adjustment for the gravity field determination to get a solution which is not biased towards the reference field. The solutions of additional estimations are relative (deltas) as the parameters are already used as Taylor point in the reference orbit.
See also PreprocessingVariationalEquation.'},
'PreprocessingVariationalEquationSstFit': { 'name': 'PreprocessingVariationalEquationSstFit', 'key': 'PreprocessingVariationalEquationSstFit', 'description': 'This program fits two This program fits two to satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. It works similar to PreprocessingVariationalEquationOrbitFit , see there for details. As the relative weighting of the observation types is important complex description of the covariances can be set with This program fits two , This program fits two , This program fits two .', 'config_table': 'outputfileVariational1 filename approximate position and integrated state matrix outputfileVariational2 filename approximate position and integrated state matrix outputfileOrbit1 filename integrated orbit outputfileOrbit2 filename integrated orbit outputfileSolution1 filename estimated calibration and state parameters outputfileSolution2 filename estimated calibration and state parameters rightHandSide sequence input for observation vectors inputfileSatelliteTracking filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations sstType choice range rangeRate none inputfileVariational1 filename approximate position and integrated state matrix inputfileVariational2 filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst parametrizationSatelliteTrackingType satellite tracking parameter integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst covarianceSstType covariance matrix of satellite to satellite tracking observations covariancePod1 covariancePodType covariance matrix of kinematic orbits (satellite 1) covariancePod2 covariancePodType covariance matrix of kinematic orbits (satellite 2) iterationCount uint for the estimation of calibration parameter and error PSD', 'display_text': 'This program fits two inputfileVariational1/2 to satellite-to-satellite-tracking (SST) and orbit observations in a GRACE like configuration. It works similar to PreprocessingVariationalEquationOrbitFit, see there for details.
As the relative weighting of the observation types is important complex description of the covariances can be set with covarianceSst, covariancePod1, covariancePod2.'},
'NoiseAccelerometer': { 'name': 'NoiseAccelerometer', 'key': 'NoiseAccelerometer', 'description': 'This program adds noise and biases to simulated accelerometer data generated by SimulateAccelerometer . See This program adds noise and biases to simulated for details on noise generation.', 'config_table': 'outputfileAccelerometer filename inputfileAccelerometer filename biasAlong double [m/s**2] biasCross double [m/s**2] biasRadial double [m/s**2] noiseAlong noiseGeneratorType [m/s**2] noiseCross noiseGeneratorType [m/s**2] noiseRadial noiseGeneratorType [m/s**2]', 'display_text': 'This program adds noise and biases to simulated accelerometer data generated by SimulateAccelerometer. See noiseGenerator for details on noise generation.'},
'NoiseGriddedData': { 'name': 'NoiseGriddedData', 'key': 'NoiseGriddedData', 'description': 'This program adds noise to gridded data data . See This program adds noise to for details on noise generation.', 'config_table': 'outputfileGriddedData filename inputfileGriddedData filename noise noiseGeneratorType startDataFields uint start countDataFields uint number of data fields (default: all after start)', 'display_text': 'This program adds noise to gridded data data. See noiseGenerator for details on noise generation.'},
'NoiseInstrument': { 'name': 'NoiseInstrument', 'key': 'NoiseInstrument', 'description': 'This program adds noise to instrument data . See This program adds noise to for details on noise generation.', 'config_table': 'outputfileInstrument filename inputfileInstrument filename noise noiseGeneratorType startDataFields uint start countDataFields uint number of data fields (default: all after start)', 'display_text': 'This program adds noise to instrument data. See noiseGenerator for details on noise generation.'},
'NoiseNormalsSolution': { 'name': 'NoiseNormalsSolution', 'key': 'NoiseNormalsSolution', 'description': 'The inverse of the normal matrix of The inverse of the normal matrix of represents the covariance matrix of the estimated parameters. This program generates a noise vector with if generated input noise is standard white noise. The noise vector is computed with where is the generated The inverse of the normal matrix of and is the cholesky upper triangle matrix of the normal matrix .', 'config_table': 'outputfileNoise filename generated noise as matrix: parameterCount x sampleCount inputfileNormalEquation filename noise noiseGeneratorType sampleCount uint number of samples to be generated useEigenvalueDecomposition boolean use eigenvalue decomposition', 'display_text': 'The inverse of the normal matrix of inputfileNormalEquation represents the covariance matrix of the estimated parameters. This program generates a noise vector with \\[ \\M\\Sigma(\\M e) = \\M N^{-1}, \\]if generated input noise is standard white noise.
The noise vector is computed with \\[ \\M e = \\M W^{-T} \\M z, \\]where $\\M z$ is the generated noise and $\\M W$ is the cholesky upper triangle matrix of the normal matrix $\\M N=\\M W^T\\M W$.'},
'NoiseOrbit': { 'name': 'NoiseOrbit', 'key': 'NoiseOrbit', 'description': 'This program adds noise to simulated satellite \'s positions and velocities generated by SimulateOrbit (along, cross, radial). See This program adds noise to simulated for details on noise options.', 'config_table': 'outputfileOrbit filename inputfileOrbit filename noisePosition noiseGeneratorType along, cross, radial [m] noiseVelocity noiseGeneratorType along, cross, radial [m/s]', 'display_text': 'This program adds noise to simulated satellite\'s positions and velocities generated by SimulateOrbit (along, cross, radial). See noiseGenerator for details on noise options.'},
'NoiseSatelliteTracking': { 'name': 'NoiseSatelliteTracking', 'key': 'NoiseSatelliteTracking', 'description': 'This program adds noise to simulated satellite tracking data generated by SimulateSatelliteTracking . See This program adds noise to simulated satellite tracking data generated by for details on noise generation.', 'config_table': 'outputfileSatelliteTracking filename inputfileSatelliteTracking filename noiseRange noiseGeneratorType [m] noiseRangeRate noiseGeneratorType [m/s] noiseRangeAcceleration noiseGeneratorType [m/s^2]', 'display_text': 'This program adds noise to simulated satellite tracking data generated by SimulateSatelliteTracking. See noiseGenerator for details on noise generation.'},
'NoiseStarCamera': { 'name': 'NoiseStarCamera', 'key': 'NoiseStarCamera', 'description': 'This program adds noise to rotation observations. The noise is computed via a pseudo random sequence. See This program adds noise to rotation observations. The noise is computed via a pseudo random sequence. See for details on noise options.', 'config_table': 'outputfileStarCamera filename inputfileStarCamera filename noiseRoll noiseGeneratorType [rad] noisePitch noiseGeneratorType [rad] noiseYaw noiseGeneratorType [rad]', 'display_text': 'This program adds noise to rotation observations. The noise is computed via a pseudo random sequence. See noiseGenerator for details on noise options.'},
'NoiseTimeSeries': { 'name': 'NoiseTimeSeries', 'key': 'NoiseTimeSeries', 'description': 'This program generates This program generates with the requested characteristics. See This program generates for details on noise options.', 'config_table': 'outputfileNoise filename outputfileCovarianceFunction filename noise noiseGeneratorType timeSeries timeSeriesType columns uint number of noise series (columns)', 'display_text': 'This program generates outputfileNoise with the requested characteristics. See noiseGenerator for details on noise options.'},
'SimulateAccelerometer': { 'name': 'SimulateAccelerometer', 'key': 'SimulateAccelerometer', 'description': 'This program simulate accelerometer data . The orientation of the accelerometer is given by This program simulate otherwise the celestial reference frame (CRF) is used. For computation of non-conservative forces a This program simulate is needed.', 'config_table': 'outputfileAccelerometer filename inputfileSatelliteModel filename satellite macro model inputfileOrbit filename inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType forces forcesType', 'display_text': 'This program simulate accelerometer data. The orientation of the accelerometer is given by inputfileStarCamera otherwise the celestial reference frame (CRF) is used. For computation of non-conservative forces a satelliteModel is needed.'},
'SimulateAccelerometerCoMOffset': { 'name': 'SimulateAccelerometerCoMOffset', 'key': 'SimulateAccelerometerCoMOffset', 'description': 'This program generates an accelerometer file containing perturbing accelerations due to a given center of mass (CoM) offset. This includes centrifugal effects, Euler forces and the effect of gravity gradients.', 'config_table': 'outputfileAccelerometer filename effect of offset inputfileOrbit filename inputfileStarCamera filename applyAngularRate boolean compute effect of centrifugal forces applyAngularAccelerations boolean compute effect of Euler forces gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement earthRotation earthRotationType interpolationDegree uint derivation of quaternions by polynomial interpolation of degree n CoMOffsetX double offset [m] CoMOffsetY double offset [m] CoMOffsetZ double offset [m]', 'display_text': 'This program generates an accelerometer file containing perturbing accelerations due to a given center of mass (CoM) offset. This includes centrifugal effects, Euler forces and the effect of gravity gradients.'},
'SimulateGradiometer': { 'name': 'SimulateGradiometer', 'key': 'SimulateGradiometer', 'description': 'This program simulates error free gradiometer data along a satellite\'s orbit. The orientation of the full tensor gradiometer is given by This program simulates error free otherwise the celestial reference frame (CRF) is used. The gravity gradients are given by This program simulates error free and This program simulates error free .', 'config_table': 'outputfileGradiometer filename inputfileOrbit filename inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType gravityfield gravityfieldType tides tidesType', 'display_text': 'This program simulates error free gradiometer data along a satellite\'s orbit. The orientation of the full tensor gradiometer is given by inputfileStarCamera otherwise the celestial reference frame (CRF) is used. The gravity gradients are given by gravityfield and tides.'},
'SimulateKeplerOrbit': { 'name': 'SimulateKeplerOrbit', 'key': 'SimulateKeplerOrbit', 'description': 'This program simulates a Keplerian orbit at a given timeSeries starting from the given integrationConstants .', 'config_table': 'outputfileOrbit filename timeSeries timeSeriesType GM double Geocentric gravitational constant integrationConstants choice kepler sequence majorAxis double [m] eccentricity double [-] inclination angle [degree] ascendingNode angle [degree] argumentOfPerigee angle [degree] meanAnomaly angle [degree] time time integration constants are valid at this epoch positionAndVelocity sequence position0x double [m] in CRF position0y double [m] in CRF position0z double [m] in CRF velocity0x double [m/s] velocity0y double [m/s] velocity0z double [m/s] time time integration constants are valid at this epoch', 'display_text': 'This program simulates a Keplerian orbit at a given timeSeries starting from the given integrationConstants.'},
'SimulateOrbit': { 'name': 'SimulateOrbit', 'key': 'SimulateOrbit', 'description': 'This program integrates an orbit from a given force function (dynamic orbit). The force functions are given by This program integrates an . For computation of non-conservative forces a satelliteModel is needed. The integration method must be selected with This program integrates an . Because the orbit data are calculated in the celestial reference frame (CRF) you need This program integrates an to transform the force function from the terrestrial reference frame (TRF). The integration start and end time, as well as the sampling, are derived from the timeSeries option. It is possible to integrate the arc in reverse , where the initial conditions are assumed to be met at the end time of the timeSeries .', 'config_table': 'outputfileOrbit filename orbit file to be written. inputfileSatelliteModel filename satellite macro model timeSeries timeSeriesType time points for simulated orbit epochs. integrationConstants choice kepler sequence majorAxis double [m] eccentricity double [-] inclination angle [degree] ascendingNode angle [degree] argumentOfPerigee angle [degree] meanAnomaly angle [degree] GM double Geocentric gravitational constant positionAndVelocity sequence position0x double [m] in CRF position0y double [m] in CRF position0z double [m] in CRF velocity0x double [m/s] velocity0y double [m/s] velocity0z double [m/s] file sequence inputfileOrbit filename only epoch at timeStart is used margin double [seconds] used when finding initial epoch in orbitFile propagator orbitPropagatorType orbit propagation method. earthRotation earthRotationType ephemerides ephemeridesType forces forcesType considered in orbit propagation. reverse boolean start integration at last epoch in timeSeries, going backward in time.', 'display_text': 'This program integrates an orbit from a given force function (dynamic orbit). The force functions are given by forces. For computation of non-conservative forces a satelliteModel is needed. The integration method must be selected with propagator. Because the orbit data are calculated in the celestial reference frame (CRF) you need earthRotation to transform the force function from the terrestrial reference frame (TRF). The integration start and end time, as well as the sampling, are derived from the timeSeries option. It is possible to integrate the arc in reverse, where the initial conditions are assumed to be met at the end time of the timeSeries.'},
'SimulateSatelliteTracking': { 'name': 'SimulateSatelliteTracking', 'key': 'SimulateSatelliteTracking', 'description': 'This program simulates tracking data (range, range-rate, range-accelerations) between 2 satellites. The range is given by with and the unit vector in line of sight (LOS) direction Range-rates and range accelrations are obtained by differentation with the derivative of the unit vector The This program simulates s must contain positions, velocities, and acceleration (see OrbitAddVelocityAndAcceleration ).', 'config_table': 'outputfileSatelliteTracking filename inputfileOrbit1 filename inputfileOrbit2 filename', 'display_text': 'This program simulates tracking data (range, range-rate, range-accelerations) between 2 satellites. The range is given by \\[ \\rho(t) = \\left\\lVert{\\M r_B(t) - \\M r_A(t)}\\right\\rVert = \\M e_{AB}(t)\\cdot\\M r_{AB}(t), \\]with $\\M r_{AB} = \\M r_B - \\M r_A$ and the unit vector in line of sight (LOS) direction \\[\\label{sst.los} \\M e_{AB} = \\frac{\\M r_{AB}}{\\left\\lVert{\\M r_{AB}}\\right\\rVert}=\\frac{\\M r_{AB}}{\\rho}. \\]Range-rates $\\dot{\\rho}$ and range accelrations $\\ddot{\\rho}$ are obtained by differentation \\[\\label{obsRangeRate} \\dot{\\rho} = \\M e_{AB}\\cdot\\dot{\\M r}_{AB} + \\dot{\\M e}_{AB}\\cdot\\M r_{AB} = \\M e_{AB}\\cdot\\dot{\\M r}_{AB}, \\]\\[\\label{obsRangeAccl} \\begin{split} \\ddot{\\rho} &= \\M e_{AB}\\cdot\\ddot{\\M r}_{AB} +\\dot{\\M e}_{AB}\\cdot\\dot{\\M r}_{AB} = \\M e_{AB}\\cdot\\ddot{\\M r}_{AB} + \\frac{1}{\\rho}\\left(\\dot{\\M r}_{AB}^2-\\dot{\\rho}^2\\right). \\\\ \\end{split} \\]with the derivative of the unit vector \\[ \\dot{\\M e}_{AB}=\\frac{d}{dt}\\left(\\frac{\\M r_{AB}}{\\rho}\\right) =\\frac{\\dot{\\M r}_{AB}}{\\rho}-\\frac{\\dot{\\rho}\\cdot\\M r_{AB}}{\\rho^2} =\\frac{1}{\\rho}\\left({\\dot{\\M r}_{AB}-\\dot{\\rho}\\cdot\\M e_{AB}}\\right). \\]The inputfileOrbits must contain positions, velocities, and acceleration (see OrbitAddVelocityAndAcceleration).'},
'SimulateStarCamera': { 'name': 'SimulateStarCamera', 'key': 'SimulateStarCamera', 'description': 'This program simulates star camera measurements at each satellite\'s position. The satellite\'s orientation follows a local orbit frame with the x-axis in along track (along velocity), y-axis is cross track (normal to position and velocity vector) and z-axis pointing nadir (negative position vector). As for non circular orbit the position and velocity are not exact normal, the default is the x-axis to be exact along velocity and the z-axis forms a right hand system (not exact nadir) or with nadirPointing the z-axis is exact nadir and x-axis approximates along. The resulting rotation matrices rotate from satellite frame to inertial frame.', 'config_table': 'outputfileStarCamera filename rotation from satellite to inertial frame (x: along, y: cross, z: nadir) inputfileOrbit filename position and velocity defines the orientation of the satellite at each epoch nadirPointing boolean false: exact along and nearly nadir, true: nearly along and exact nadir', 'display_text': 'This program simulates star camera measurements at each satellite\'s position. The satellite\'s orientation follows a local orbit frame with the x-axis in along track (along velocity), y-axis is cross track (normal to position and velocity vector) and z-axis pointing nadir (negative position vector). As for non circular orbit the position and velocity are not exact normal, the default is the x-axis to be exact along velocity and the z-axis forms a right hand system (not exact nadir) or with nadirPointing the z-axis is exact nadir and x-axis approximates along. The resulting rotation matrices rotate from satellite frame to inertial frame.'},
'SimulateStarCameraGnss': { 'name': 'SimulateStarCameraGnss', 'key': 'SimulateStarCameraGnss', 'description': 'This program simulates star camera measurements at each satellite position of This program simulates . The resulting rotation matrices rotate from body frame to inertial frame. The body frame refers to the IGS-specific (not the manufacturer-specific) body frame, as described by . The This program simulates must contain velocities (use OrbitAddVelocityAndAcceleration if needed). Information about the attitude mode(s) used by the GNSS satellite may be provided via This program simulates . This file can be created with GnssAttitudeInfoCreate . It contains one or more time-dependent entries, each defining the default attitude mode, the attitude modes used around orbit noon and midnight, and some parameters required by the various modes. If no This program simulates is selected, the program defaults to a nominal yaw-steering attitude model. A sufficiently high modelingResolution ensures that the attitude behavior is modeled properly at all times. The attitude behavior is defined by the respective mode. Here is a list of the supported modes with a brief explanation and references: nominalYawSteering : Yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) [1] orbitNormal : Keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) [1] catchUpYawSteering : Yaw at maximum yaw rate to catch up to nominal yaw angle (e.g. GPS-* (noon), GPS-IIR (midnight)) [2, 3] shadowMaxYawSteeringAndRecovery : Yaw at maximum yaw rate from shadow start to end, recover after shadow (e.g. GPS-IIA (midnight)) [2] shadowMaxYawSteeringAndStop : Yaw at maximum yaw rate from shadow start until nominal yaw angle at shadow end is reached, then stop (e.g. GLO-M (midnight)) [4] shadowConstantYawSteering : Yaw at constant yaw rate from shadow start to end (e.g. GPS-IIF (midnight)) [3] centeredMaxYawSteering : Yaw at maximum yaw rate centered around noon/midnight (e.g. QZS-2I, GLO-M (noon)) [4, 8] smoothedYawSteering1 : Yaw based on an auxiliary Sun vector for a smooth yaw maneuver (e.g. GAL-1) [5] smoothedYawSteering2 : Yaw based on a modified yaw-steering law for a smooth yaw maneuver (e.g. GAL-2, BDS-3M, BDS-3I) [5, 6] betaDependentOrbitNormal : Switch to orbit normal mode if below beta angle threshold (e.g. BDS-2M, BDS-2I, QZS-1) [7, 8] See GnssAttitudeInfoCreate for more details on which satellite uses which attitude modes and the required parameters for each mode. References for the attitude modes: https://www.gsc-europa.eu/support-to-developers/galileo-satellite-metadata#3 https://qzss.go.jp/en/technical/qzssinfo/index.html', 'config_table': 'outputfileStarCamera filename rotation from body frame to CRF inputfileOrbit filename attitude is modeled based on this orbit inputfileAttitudeInfo filename attitude modes used by the satellite and respective parameters interpolationDegree uint polynomial degree for orbit interpolation modelingResolution double [s] resolution for attitude model evaluation ephemerides ephemeridesType eclipse eclipseType model to determine if satellite is in Earth\'s shadow', 'display_text': 'This program simulates star camera measurements at each satellite position of inputfileOrbit. The resulting rotation matrices rotate from body frame to inertial frame. The body frame refers to the IGS-specific (not the manufacturer-specific) body frame, as described by Montenbruck et al. (2015). The inputfileOrbit must contain velocities (use OrbitAddVelocityAndAcceleration if needed).
Information about the attitude mode(s) used by the GNSS satellite may be provided via inputfileAttitudeInfo. This file can be created with GnssAttitudeInfoCreate. It contains one or more time-dependent entries, each defining the default attitude mode, the attitude modes used around orbit noon and midnight, and some parameters required by the various modes. If no inputfileAttitudeInfo is selected, the program defaults to a nominal yaw-steering attitude model. A sufficiently high modelingResolution ensures that the attitude behavior is modeled properly at all times.
The attitude behavior is defined by the respective mode. Here is a list of the supported modes with a brief explanation and references:
nominalYawSteering: Yaw to keep solar panels aligned to Sun (e.g. most GNSS satellites outside eclipse) [1]
orbitNormal: Keep fixed yaw angle, for example point X-axis in flight direction (e.g. BDS-2G, BDS-3G, QZS-2G) [1]
catchUpYawSteering: Yaw at maximum yaw rate to catch up to nominal yaw angle (e.g. GPS-* (noon), GPS-IIR (midnight)) [2, 3]
shadowMaxYawSteeringAndRecovery: Yaw at maximum yaw rate from shadow start to end, recover after shadow (e.g. GPS-IIA (midnight)) [2]
shadowMaxYawSteeringAndStop: Yaw at maximum yaw rate from shadow start until nominal yaw angle at shadow end is reached, then stop (e.g. GLO-M (midnight)) [4]
shadowConstantYawSteering: Yaw at constant yaw rate from shadow start to end (e.g. GPS-IIF (midnight)) [3]
centeredMaxYawSteering: Yaw at maximum yaw rate centered around noon/midnight (e.g. QZS-2I, GLO-M (noon)) [4, 8]
smoothedYawSteering1: Yaw based on an auxiliary Sun vector for a smooth yaw maneuver (e.g. GAL-1) [5]
smoothedYawSteering2: Yaw based on a modified yaw-steering law for a smooth yaw maneuver (e.g. GAL-2, BDS-3M, BDS-3I) [5, 6]
betaDependentOrbitNormal: Switch to orbit normal mode if below beta angle threshold (e.g. BDS-2M, BDS-2I, QZS-1) [7, 8]
See GnssAttitudeInfoCreate for more details on which satellite uses which attitude modes and the required parameters for each mode.
'},
'SimulateStarCameraGrace': { 'name': 'SimulateStarCameraGrace', 'key': 'SimulateStarCameraGrace', 'description': 'Simulates star camera data of the two GRACE satellites. x: the antenna center pointing to the other satellite. y: normal to line of sight and the radial direction. z: forms a right handed system.', 'config_table': 'outputfileStarCamera1 filename outputfileStarCamera2 filename inputfileOrbit1 filename position define the orientation of the satellite at each epoch inputfileOrbit2 filename position define the orientation of the satellite at each epoch antennaCenters choice KBR antenna phase center value sequence center1X double x-coordinate of antenna position in SRF [m] for GRACEA center1Y double y-coordinate of antenna position in SRF [m] for GRACEA center1Z double z-coordinate of antenna position in SRF [m] for GRACEA center2X double x-coordinate of antenna position in SRF [m] for GRACEB center2Y double y-coordinate of antenna position in SRF [m] for GRACEB center2Z double z-coordinate of antenna position in SRF [m] for GRACEB file sequence inputAntennaCenters filename', 'display_text': 'Simulates star camera data of the two GRACE satellites.
x: the antenna center pointing to the other satellite.
y: normal to line of sight and the radial direction.
z: forms a right handed system.
'},
'SimulateStarCameraSentinel1': { 'name': 'SimulateStarCameraSentinel1', 'key': 'SimulateStarCameraSentinel1', 'description': 'This program simulates star camera measurements at each satellite\'s position for the Sentinel 1A satellite. The This program simulates must contain positions and velocities (see OrbitAddVelocityAndAcceleration ). The resulting rotation matrices rotate from satellite frame to inertial frame.', 'config_table': 'outputfileStarCamera filename inputfileOrbit filename position and velocity defines the orientation of the satellite at each epoch', 'display_text': 'This program simulates star camera measurements at each satellite\'s position for the Sentinel 1A satellite. The inputfileOrbit must contain positions and velocities (see OrbitAddVelocityAndAcceleration). The resulting rotation matrices rotate from satellite frame to inertial frame.'},
'SimulateStarCameraTerrasar': { 'name': 'SimulateStarCameraTerrasar', 'key': 'SimulateStarCameraTerrasar', 'description': 'This program simulates This program simulates measurements at each satellite\'s position for the Terrasar satellite. The This program simulates must contain positions and velocities (see OrbitAddVelocityAndAcceleration ). The resulting rotation matrices rotate from satellite frame to inertial frame. H. Fiedler, E. Boerner, J. Mittermayer and G. Krieger, Total zero Doppler Steering-a new method for minimizing the Doppler centroid, in IEEE Geoscience and Remote Sensing Letters, vol. 2, no. 2, pp. 141-145, April 2005, https://www.doi.org/10.1109/LGRS.2005.844591 .', 'config_table': 'outputfileStarCamera filename rotation from satellite to inertial frame (x: along, y: cross, z: nadir) inputfileOrbit filename position and velocity defines the orientation of the satellite at each epoch', 'display_text': 'This program simulates outputfileStarCamera measurements at each satellite\'s position for the Terrasar satellite. The inputfileOrbit must contain positions and velocities (see OrbitAddVelocityAndAcceleration). The resulting rotation matrices rotate from satellite frame to inertial frame.
H. Fiedler, E. Boerner, J. Mittermayer and G. Krieger, Total zero Doppler Steering-a new method for minimizing the Doppler centroid, in IEEE Geoscience and Remote Sensing Letters, vol. 2, no. 2, pp. 141-145, April 2005, https://www.doi.org/10.1109/LGRS.2005.844591.'},
'SlrProcessing': { 'name': 'SlrProcessing', 'key': 'SlrProcessing', 'description': 'This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. To calculate observation equations from the passes, the model parameters or unknown parameters need to be defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. . Some of the This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. also include a priori models. Lastly it is required to define the process flow of the SLR processing. This is accomplished with a list of This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. . Each step is processed consecutively. Some steps allow the selection of parameters, station, or satellites, which affects all subsequent steps. The This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it. is used to precompute Earth rotation and station displacements with a uniform sampling. In a second step these values are interpolated to the observation epochs. A sampling of about 10 minutes should be adequate. It should be noted that GROOPS uses GPS time format, but normal point/full rate data files and CPF files, provided by ILRS data centers are given in UTC time format.', 'config_table': 'timeSeries timeSeriesType defines station movements and earth rotation epochs satellite slrSatelliteGeneratorType satellites station slrStationGeneratorType ground station network earthRotation earthRotationType apriori earth rotation parametrization slrParametrizationType models and parameters processingStep slrProcessingStepType steps are processed consecutively', 'display_text': 'This program processes SLR normal point or full rate observations. It calculates the linearized observation equations, accumulates them into a system of normal equations and solves it.
To calculate observation equations from the passes, the model parameters or unknown parameters need to be defined beforehand. These unknown parameters can be chosen arbitrarily by the user with an adequate list of defined parametrization. Some of the parametrization also include a priori models.
Lastly it is required to define the process flow of the SLR processing. This is accomplished with a list of processingSteps. Each step is processed consecutively. Some steps allow the selection of parameters, station, or satellites, which affects all subsequent steps.
The timeSeries is used to precompute Earth rotation and station displacements with a uniform sampling. In a second step these values are interpolated to the observation epochs. A sampling of about 10 minutes should be adequate.
It should be noted that GROOPS uses GPS time format, but normal point/full rate data files and CPF files, provided by ILRS data centers are given in UTC time format.'},
'FileConvert': { 'name': 'FileConvert', 'key': 'FileConvert', 'description': 'Converts GROOPS file between different file formats (ASCII, XML, JSON, binary), see file formats for details. With an additional extension of \'.gz\' files are directly compressed and uncompressed. It prints also some information about the content. Therefore it can be used to get an idea about the content of binary files.', 'config_table': 'outputfile filename GROOPS formats: .xml, .txt, .json, .dat (optional with additional .gz) inputfile filename GROOPS formats: .xml, .txt, .json, .dat (optional with additional .gz)', 'display_text': 'Converts GROOPS file between different file formats (ASCII, XML, JSON, binary), see file formats for details. With an additional extension of \'.gz\' files are directly compressed and uncompressed. It prints also some information about the content. Therefore it can be used to get an idea about the content of binary files.'},
'FileCreateDirectories': { 'name': 'FileCreateDirectories', 'key': 'FileCreateDirectories', 'description': 'Creates the directory and parent directories as needed.', 'config_table': 'directory filename', 'display_text': 'Creates the directory and parent directories as needed.'},
'FileMove': { 'name': 'FileMove', 'key': 'FileMove', 'description': 'Move/rename file or directory. If the outputfile is an existing directory the inputfile is moved into it.', 'config_table': 'outputfile filename target name or directory for the move/rename inputfile filename', 'display_text': 'Move/rename file or directory. If the outputfile is an existing directory the inputfile is moved into it.'},
'FileRemove': { 'name': 'FileRemove', 'key': 'FileRemove', 'description': 'Remove files or directories. Deletes also the content recursivley if one of files is a directory.', 'config_table': 'files filename', 'display_text': 'Remove files or directories. Deletes also the content recursivley if one of files is a directory.'},
'FileTextCreate': { 'name': 'FileTextCreate', 'key': 'FileTextCreate', 'description': 'Create text outputfile containing line s. This program can be a powerful tool, if the line is repeated with a Create text together with the text parser .', 'config_table': 'outputfile filename line string', 'display_text': 'Create text outputfile containing lines. This program can be a powerful tool, if the line is repeated with a loop together with the text parser.'},
'GroupPrograms': { 'name': 'GroupPrograms', 'key': 'GroupPrograms', 'description': 'Runs program s in a group, which can be used to structure a config file. If catchErrors is enabled and an error occurs, the remaining program s are skipped and execution continues with errorProgram s, in case any are defined. Otherwise an exception is thrown. The silently option disables the screen ouput of the program s. With outputfileLog a log file is written for this group additional to a global log file. This might be helpful within LoopPrograms with parallel iterations.', 'config_table': 'outputfileLog filename additional log file silently boolean without showing the output. program programType catchErrors sequence errorProgram programType executed if an error occured', 'display_text': 'Runs programs in a group, which can be used to structure a config file. If catchErrors is enabled and an error occurs, the remaining programs are skipped and execution continues with errorPrograms, in case any are defined. Otherwise an exception is thrown.
The silently option disables the screen ouput of the programs. With outputfileLog a log file is written for this group additional to a global log file. This might be helpful within LoopPrograms with parallel iterations.'},
'IfPrograms': { 'name': 'IfPrograms', 'key': 'IfPrograms', 'description': 'Runs a list of program s if a Runs a list of is met. Otherwise elseProgram s are executed.', 'config_table': 'condition conditionType program programType executed if condition evaluates to true elseProgram programType executed if condition evaluates to false', 'display_text': 'Runs a list of programs if a condition is met. Otherwise elsePrograms are executed.'},
'LoopPrograms': { 'name': 'LoopPrograms', 'key': 'LoopPrograms', 'description': 'This program runs a list of programs in a This program runs a list of programs in a . If continueAfterError = yes and an error occurs, the remaining programs in the current iteration are skipped and the loop continues with the next iteration. Otherwise an exception is thrown. If this program is executed on multiple processing nodes, the iterations can be computed in parallel, see parallelization . The first process serves as load balancer and the other processes are assigned to iterations according to processCountPerIteration . For example, running a loop containing three iterations on 13 processes with processCountPerIteration = 4 , runs the three iterations in parallel, with each iteration being assigned four processes. With parallelLog = yes all processes write output to screen and the log file. As the output can be quite confusing in this case, running GroupPrograms with an extra outputfileLog for each iteration (use the loop variables for the name of the log files) might be helpful.', 'config_table': 'loop loopType subprograms are called for every iteration continueAfterError boolean continue with next iteration after error, otherwise throw exception processCountPerIteration uint 0: use all processes for each iteration parallelLog boolean write to screen/log file from all processing nodes in parallelized loops program programType', 'display_text': 'This program runs a list of programs in a loop.
If continueAfterError=yes and an error occurs, the remaining programs in the current iteration are skipped and the loop continues with the next iteration. Otherwise an exception is thrown.
If this program is executed on multiple processing nodes, the iterations can be computed in parallel, see parallelization. The first process serves as load balancer and the other processes are assigned to iterations according to processCountPerIteration. For example, running a loop containing three iterations on 13 processes with processCountPerIteration=4, runs the three iterations in parallel, with each iteration being assigned four processes. With parallelLog=yes all processes write output to screen and the log file. As the output can be quite confusing in this case, running GroupPrograms with an extra outputfileLog for each iteration (use the loop variables for the name of the log files) might be helpful.'},
'RunCommand': { 'name': 'RunCommand', 'key': 'RunCommand', 'description': 'Execute system command s. If executeParallel is set and multiple command s are given they are executed in parallel at distributed nodes, otherwise they are executed consecutively at master node only.', 'config_table': 'command filename silently boolean without showing the output. continueAfterError boolean continue with next command after error, otherwise throw exception executeParallel boolean execute several commands in parallel', 'display_text': 'Execute system commands. If executeParallel is set and multiple commands are given they are executed in parallel at distributed nodes, otherwise they are executed consecutively at master node only.'},
'Accelerometer2GraceL1b': { 'name': 'Accelerometer2GraceL1b', 'key': 'Accelerometer2GraceL1b', 'description': 'This program converts accelerometer data from the instrument file (ACCELEROMETER) format into GRACE SDS format. The text file inputfileHeader is placed at the beginning of the outputfile . The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin} , {epochmax} , and {epochcount} . See also GraceL1b2Accelerometer .', 'config_table': 'outputfile filename ACT1B inputfileHeader filename YAML Header, {epochmin}, {epochmax}, {epochcount} available inputfileAccelerometer filename ACCELEROMETER inputfileAngularAccelerometer filename ACCELEROMETER inputfileFlags filename MISCVALUES(qualflg, acl_res.x, acl_res.y, acl_res.z) satelliteId string A, B, C or D', 'display_text': 'This program converts accelerometer data from the instrument file (ACCELEROMETER) format into GRACE SDS format.
The text file inputfileHeader is placed at the beginning of the outputfile. The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin}, {epochmax}, and {epochcount}.
See also GraceL1b2Accelerometer.'},
'BerneseKinematic2Orbit': { 'name': 'BerneseKinematic2Orbit', 'key': 'BerneseKinematic2Orbit', 'description': 'Read kinematic orbits in Bernese format.', 'config_table': 'outputfileOrbit filename outputfileCovariance filename earthRotation earthRotationType from TRF to CRF inputfile filename', 'display_text': 'Read kinematic orbits in Bernese format.'},
'Champ2AccStar': { 'name': 'Champ2AccStar', 'key': 'Champ2AccStar', 'description': 'This program reads in CHAMP accelerometer and star camera data given in the special CHAMP format. In case of CHAMP accelerometer and star camera data is both stored in one file. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf .', 'config_table': 'outputfileAccelerometer filename outputfileAngularAcceleration filename outputfileStarCamera filename inputfile filename', 'display_text': 'This program reads in CHAMP accelerometer and star camera data given in the special CHAMP format. In case of CHAMP accelerometer and star camera data is both stored in one file. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf.'},
'Champ2Orbit': { 'name': 'Champ2Orbit', 'key': 'Champ2Orbit', 'description': 'This program reads in CHAMP precise science orbits in the special CHORB format. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-002.pdf', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType inputfile filename', 'display_text': 'This program reads in CHAMP precise science orbits in the special CHORB format. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-002.pdf'},
'Cosmic2OrbitStar': { 'name': 'Cosmic2OrbitStar', 'key': 'Cosmic2OrbitStar', 'description': 'This program reads in cosmic orbit and star camera data given in the CHAMP format. In case of cosmic orbit and star camera data is stored in one file. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf', 'config_table': 'outputfileOrbit filename outputfileStarCamera filename inputfile filename', 'display_text': 'This program reads in cosmic orbit and star camera data given in the CHAMP format. In case of cosmic orbit and star camera data is stored in one file. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-001.pdf'},
'Cpf2Orbit': { 'name': 'Cpf2Orbit', 'key': 'Cpf2Orbit', 'description': 'Converts and writes an instrument file (ORBIT) . The time format of the CPF file is UTC. The coordinate system used in the CPF format is usually represented in TRF. If Converts is provided the data are transformed from terrestrial (TRF) to celestial reference frame (CRF). See also Orbit2Cpf', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType inputfile filename SLR CPF file', 'display_text': 'Converts CPF file and writes an instrument file (ORBIT).
The time format of the CPF file is UTC. The coordinate system used in the CPF format is usually represented in TRF. If earthRotation is provided the data are transformed from terrestrial (TRF) to celestial reference frame (CRF).
See also Orbit2Cpf'},
'Crd2NormalPoints': { 'name': 'Crd2NormalPoints', 'key': 'Crd2NormalPoints', 'description': 'Converts and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength and window size.', 'config_table': 'outputfileNormalPoints filename variable {station} available outputfileMeteorological filename variable {station} available inputfileSlrData filename SLR CRD files', 'display_text': 'Converts CRD file and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength and window size.'},
'Cstg2NormalPoints': { 'name': 'Cstg2NormalPoints', 'key': 'Cstg2NormalPoints', 'description': 'Converts provided by the and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength and window size.', 'config_table': 'outputfileNormalPoints filename variable {station} available outputfileMeteorological filename variable {station} available inputfileSlrData filename SLR CSTG file', 'display_text': 'Converts CSTG file provided by the ILRS and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength and window size.'},
'DoodsonAdmittance2SupplementaryFiles': { 'name': 'DoodsonAdmittance2SupplementaryFiles', 'key': 'DoodsonAdmittance2SupplementaryFiles', 'description': 'The publication of an ocean tide model includes not only the atlas in the form of spherical harmonics coefficients, but also the matrix of Doodson multipliers ( outputfileDoodsonMatrix ) and the outputfileAdmittanceMatrix . The outputfileMajorTideList contains the fileNames for each contituent. The required information is taken from the The publication of an ocean tide model includes not only the atlas in the form of spherical harmonics coefficients, but also the matrix of Doodson multipliers ( . See also DoodsonHarmonics2PotentialCoefficients .', 'config_table': 'outputfileMajorTideList filename fileNames filename template for fileList, variables: doodson, name, cossin outputfileDoodsonMatrix filename outputfileAdmittanceMatrix filename inputfileAdmittance filename interpolation of minor constituents', 'display_text': 'The publication of an ocean tide model includes not only the atlas in the form of spherical harmonics coefficients, but also the matrix of Doodson multipliers (outputfileDoodsonMatrix) and the outputfileAdmittanceMatrix.
The outputfileMajorTideList contains the fileNames for each contituent. The required information is taken from the inputfileAdmittance.
See also DoodsonHarmonics2PotentialCoefficients.'},
'DoodsonHarmonics2IersPotential': { 'name': 'DoodsonHarmonics2IersPotential', 'key': 'DoodsonHarmonics2IersPotential', 'description': 'Convert doodson harmonics to IERS conventions according to FES2004. cf. ftp://tai.bipm.org/iers/conv2010/chapter6/tidemodels/fes2004.dat .', 'config_table': 'outputfile filename according to IERS2010, chapter 6.3.2, footnote 7 inputfileDoodsonHarmoncis filename header string info for output header factor double minDegree uint maxDegree uint', 'display_text': 'Convert doodson harmonics to IERS conventions according to FES2004. cf. ftp://tai.bipm.org/iers/conv2010/chapter6/tidemodels/fes2004.dat.'},
'DoodsonHarmonics2IersWaterHeight': { 'name': 'DoodsonHarmonics2IersWaterHeight', 'key': 'DoodsonHarmonics2IersWaterHeight', 'description': 'Convert doodson harmonics to IERS conventions according to FES2004. cf. ftp://tai.bipm.org/iers/conv2010/chapter6/tidemodels/fes2004.dat .', 'config_table': 'outputfile filename according to IERS2010, chapter 6.3.2, footnote 7 inputfileDoodsonHarmoncis filename inputfileTideGeneratingPotential filename to compute Xi phase correction header string info for output header kernel kernelType data type of output values factor double e.g. from [m] to [cm] minDegree uint maxDegree uint', 'display_text': 'Convert doodson harmonics to IERS conventions according to FES2004. cf. ftp://tai.bipm.org/iers/conv2010/chapter6/tidemodels/fes2004.dat.'},
'GnssAntex2AntennaDefinition': { 'name': 'GnssAntex2AntennaDefinition', 'key': 'GnssAntex2AntennaDefinition', 'description': 'Converts metadata and antenna definitions from the . to Converts metadata and antenna definitions from the , Converts metadata and antenna definitions from the , and Converts metadata and antenna definitions from the files for the respective GNSS and for the list of ground station antennas. The transmitterInfo files for GLONASS satellites should then be updated using GnssGlonassFrequencyNumberUpdate .', 'config_table': 'outputfileAntennaDefinitionStation filename antenna center variations outputfileAntennaDefinitionTransmitter filename antenna center variations outputfileTransmitterInfo filename PRN is appended to file name outputfileTransmitterListGps filename list of PRNs outputfileTransmitterListGlonass filename list of PRNs outputfileTransmitterListGalileo filename list of PRNs outputfileTransmitterListBeiDou filename list of PRNs outputfileTransmitterListQzss filename list of PRNs outputfileTransmitterListIrnss filename list of PRNs inputfileAntex filename timeStart time ignore older antenna definitions createZeroModel boolean create empty antenna patterns', 'display_text': 'Converts metadata and antenna definitions from the IGS ANTEX format. to antennaDefinition, transmitterInfo, and transmitterList files for the respective GNSS and for the list of ground station antennas.
The transmitterInfo files for GLONASS satellites should then be updated using GnssGlonassFrequencyNumberUpdate.'},
'GnssClock2ClockRinex': { 'name': 'GnssClock2ClockRinex', 'key': 'GnssClock2ClockRinex', 'description': 'Converts GNSS clocks from GROOPS format to . Clocks can be provided via satelliteData and/or stationData . Observed signal types are inferred from Converts GNSS clocks from GROOPS format to . Satellites/stations used as clock references can be provided via referenceClock . See IGS clock RINEX format description for further details on header information.', 'config_table': 'outputfileClockRinex filename satelliteData sequence one element per satellite inputfileClock filename clock instrument file inputfileSignalBias filename signal bias file identifier string PRN (e.g. G23) stationData sequence one element per station inputfileClock filename clock instrument file inputfilePosition filename station position file inputfileStationInfo filename station info file identifier string station name (e.g. wtzz) comment string comment in header program string name of program (for first line) institution string name of agency (for first line) analysisCenter string name of analysis center differentialCodeBias string program and source for applied differential code bias phaseCenterVariations string program and source for applied phase center variations referenceClock string identifier of reference satellite/station referenceFrame string terrestrial reference frame for the stations', 'display_text': 'Converts GNSS clocks from GROOPS format to IGS clock RINEX format. Clocks can be provided via satelliteData and/or stationData. Observed signal types are inferred from inputfileSignalBias. Satellites/stations used as clock references can be provided via referenceClock.
See IGS clock RINEX format description for further details on header information.'},
'GnssClockRinex2InstrumentClock': { 'name': 'GnssClockRinex2InstrumentClock', 'key': 'GnssClockRinex2InstrumentClock', 'description': 'This program converts clocks from the , which contains the clocks of all satellites and stations in a single file, into an instrument file (MISCVALUE) for each identifier (satellite and/or station).', 'config_table': 'outputfileInstrument filename identifier is appended to each file inputfileClockRinex filename identifier string satellite or station identifier, e.g. G23 or alic intervals timeSeriesType minEpochsPerInterval uint minimum number of epochs in an interval', 'display_text': 'This program converts clocks from the IGS clock RINEX format, which contains the clocks of all satellites and stations in a single file, into an instrument file (MISCVALUE) for each identifier (satellite and/or station).'},
'GnssEop2IgsErp': { 'name': 'GnssEop2IgsErp', 'key': 'GnssEop2IgsErp', 'description': 'Write GNSS Earth orientation parameters to . Requires polar motion, polar motion rate, dUT1 and LOD parameters in the solution vector Write GNSS Earth orientation parameters to and their sigmas in Write GNSS Earth orientation parameters to . Solution usually comes out of GnssProcessing .', 'config_table': 'outputfileIgsErp filename IGS ERP file epoch sequence e.g. daily solution inputfileSolution filename parameter vector inputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) inputfileParameterNames filename parameter names inputfileTransmitterList filename transmitter PRNs used in solution (used for transmitter count) inputfileStationList filename stations used in solution (used for station count) time time reference time for epoch comment string', 'display_text': 'Write GNSS Earth orientation parameters to IGS ERP file format.
Requires polar motion, polar motion rate, dUT1 and LOD parameters in the solution vector inputfileSolution and their sigmas in inputfileSigmax. Solution usually comes out of GnssProcessing.'},
'GnssGriddedDataTimeSeries2Ionex': { 'name': 'GnssGriddedDataTimeSeries2Ionex', 'key': 'GnssGriddedDataTimeSeries2Ionex', 'description': 'Converts TEC maps from GROOPS gridded data time series format to IGS . Currently only supports 2D TEC maps. See also GnssIonex2GriddedDataTimeSeries , Converts TEC maps from GROOPS .', 'config_table': 'outputfileIonex filename inputfileGriddedDataTimeSeries filename must contain regular grid value expression expression (e.g. data column) timeSeries timeSeriesType (empty = use input file time series) program string name of program (for first line) institution string name of agency (for first line) description string description in header comment string comment in header mappingFunction string see IONEX documentation elevationCutoff double see IONEX documentation (0 if unknown) observablesUsed string see IONEX documentation exponent int factor 10^exponent is applied to all values', 'display_text': 'Converts TEC maps from GROOPS gridded data time series format to IGS IONEX file format.
Currently only supports 2D TEC maps.
See also GnssIonex2GriddedDataTimeSeries, IonosphereMap.'},
'GnssIonex2GriddedDataTimeSeries': { 'name': 'GnssIonex2GriddedDataTimeSeries', 'key': 'GnssIonex2GriddedDataTimeSeries', 'description': 'Converts TEC maps from IGS to GROOPS gridded data time series format. Currently only supports 2D TEC maps. See also GnssGriddedDataTimeSeries2Ionex , Converts TEC maps from IGS .', 'config_table': 'outputfileGriddedDataTimeSeries filename inputfileIonex filename', 'display_text': 'Converts TEC maps from IGS IONEX file format to GROOPS gridded data time series format.
Currently only supports 2D TEC maps.
See also GnssGriddedDataTimeSeries2Ionex, IonosphereMap.'},
'GnssNormals2Sinex': { 'name': 'GnssNormals2Sinex', 'key': 'GnssNormals2Sinex', 'description': 'Write GNSS data/metadata and normal equations to . Normal equations usually come from GnssProcessing (e.g. from GNSS satellite orbit determination and station network analysis ). Metadata input files include Write GNSS data/metadata and , Write GNSS data/metadata and , and Write GNSS data/metadata and , see GnssAntex2AntennaDefinition . See also Sinex2Normals and NormalsSphericalHarmonics2Sinex .', 'config_table': 'outputfileSinexNormals filename full SINEX file including normal equations outputfileSinexCoordinates filename SINEX file without normal equations (station coordinates file) inputfileNormals filename normal equation matrix inputfileSolution filename parameter vector inputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) inputfileApriori filename apriori parameter vector inputfileAprioriSigma filename constraint sigmas for apriori parameter vector inputfileAprioriMatrix filename normal equation matrix of applied constraints transmitterConstellation sequence transmitter constellation metadata inputfileTransmitterList filename transmitter PRNs used in solution inputfileTransmitterInfo filename transmitter info file template inputfileAntennaDefinition filename transmitter phase centers and variations (ANTEX) variablePrn string loop variable for PRNs from transmitter list stations sequence inputfileStationList filename stations contained in normal equations inputfileStationInfo filename station info file template inputfileAntennaDefinition filename station phase centers and variations (ANTEX) variableStationName string loop variable for station names from station list observationTimeStart time start time for which solution has observations observationTimeEnd time end time for which solution has observations time time reference time for parameters sampling double [seconds] observation sampling antennaCalibrationModel string e.g. IGS14_WWWW (WWWW = ANTEX release GPS week) sinexHeader sequence agencyCode string identify the agency providing the data timeStart time start time of the data timeEnd time end time of the data observationCode string technique used to generate the SINEX solution constraintCode string 0: tight constraint, 1: siginficant constraint, 2: unconstrained solutionContent string solution types contained in the SINEX solution (S O E T C A) description string organizitions gathering/alerting the file contents contact string Address of the relevant contact. e-mail output string Description of the file contents input string Brief description of the input used to generate this solution software string Software used to generate the file hardware string Computer hardware on which above software was run inputfileComment filename comments in the comment block from a file (truncated at 80 characters) comment string comments in the comment block', 'display_text': 'Write GNSS data/metadata and normal equations to SINEX format.
See also Sinex2Normals and NormalsSphericalHarmonics2Sinex.'},
'GnssOrbex2StarCamera': { 'name': 'GnssOrbex2StarCamera', 'key': 'GnssOrbex2StarCamera', 'description': 'Converts GNSS satellite attitude from (quaternions) to instrument file (STARCAMERA) . The resulting star camera files contain the rotation from satellite body frame to TRF, or to CRF in case Converts GNSS satellite attitude from is provided. See also GnssAttitude2Orbex .', 'config_table': 'outputfileStarCamera filename rotation from body frame to TRF/CRF, identifier is appended to each file inputfileOrbex filename identifier string (empty = all) satellite identifier, e.g. G23 or E05 earthRotation earthRotationType rotation from TRF to CRF', 'display_text': 'Converts GNSS satellite attitude from ORBEX file format (quaternions) to instrument file (STARCAMERA). The resulting star camera files contain the rotation from satellite body frame to TRF, or to CRF in case earthRotation is provided.
See also GnssAttitude2Orbex.'},
'GnssReceiver2RinexObservation': { 'name': 'GnssReceiver2RinexObservation', 'key': 'GnssReceiver2RinexObservation', 'description': 'Converts a Converts a into a observation file. The Converts a contains the antenna and receiver information for the RINEX header. The Converts a and Converts a can be used to filter the observation types that will be exported.', 'config_table': 'outputfileRinexObservation filename RINEX observation file inputfileGnssReceiver filename GNSS instrument file inputfileStationInfo filename antenna and receiver info comment string write comments at begin of header observer string header information angency string header information useType gnssType only use observations that match any of these patterns ignoreType gnssType ignore observations that match any of these patterns', 'display_text': 'Converts a inputfileGnssReceiver into a RINEX observation file. The inputfileStationInfo contains the antenna and receiver information for the RINEX header. The useType and ignoreType can be used to filter the observation types that will be exported.'},
'GnssRinexNavigation2OrbitClock': { 'name': 'GnssRinexNavigation2OrbitClock', 'key': 'GnssRinexNavigation2OrbitClock', 'description': 'Evaluates orbit and clock parameters from (version 2, 3, and 4) navigation file inputfileRinex at epochs given by Evaluates orbit and clock parameters from and writes them to Evaluates orbit and clock parameters from and Evaluates orbit and clock parameters from , respectively. Orbits are rotated from TRF (as broadcasted) to CRF via Evaluates orbit and clock parameters from , but system-specific TRFs (WGS84, PZ-90, etc.) are not aligned to a common TRF. Furthermore, option is available to remove any satellite ephemeris data that has their satellite flag set to unhealthy. See also OrbitAddVelocityAndAcceleration .', 'config_table': 'outputfileOrbit filename PRN is appended to file name outputfileClock filename PRN is appended to file name inputfileRinex filename RINEX navigation file timeSeries timeSeriesType orbit and clock evaluation epochs earthRotation earthRotationType for rotation from TRF to CRF useType gnssType (e.g. ***G12) only use satellites with PRN that match any of these patterns ignoreType gnssType (e.g. ***R**) ignore satellites PRN that match any of these patterns removeUnhealthySatellites boolean Remove satellite ephemeris that have their sat flags set to unhealthy', 'display_text': 'Evaluates orbit and clock parameters from RINEX (version 2, 3, and 4) navigation file inputfileRinex at epochs given by timeSeries and writes them to outputfileOrbit and outputfileClock, respectively.
Orbits are rotated from TRF (as broadcasted) to CRF via earthRotation, but system-specific TRFs (WGS84, PZ-90, etc.) are not aligned to a common TRF.
Furthermore, option is available to remove any satellite ephemeris data that has their satellite flag set to unhealthy.
See also OrbitAddVelocityAndAcceleration.'},
'GnssSignalBias2SinexBias': { 'name': 'GnssSignalBias2SinexBias', 'key': 'GnssSignalBias2SinexBias', 'description': 'Convert GNSS signal biases from GROOPS format to . Biases can be provided via transmitterBiases and/or receiverBiases . Phase biases without attribute (e.g. L1* ) are automatically expanded so each code bias has a corresponding phase bias (Example: C1C , C1W , L1* are converted to C1C , C1W , L1C , L1W ). Time-variable biases (e.g. GPS L5 satellite phase bias) can be provided via timeVariableBias . Their time span will be based on the provided epochs ( ). The slope of the bias can be optionally provided in the second data column. If GLONASS receiver biases depend on frequency number, those must be defined in Convert to get the correct PRN/SVN assignment to the biases. See IGS SINEX Bias format description for further details on header information. See also GnssSinexBias2SignalBias and GnssBiasClockAlignment .', 'config_table': 'outputfileSinexBias filename inputfileTransmitterInfo filename one file per satellite transmitterBiases sequence one element per satellite inputfileSignalBias filename signal bias file timeVariableBias sequence one entry per time variable bias type inputfileSignalBias filename columns: mjd, bias [m], (biasSlope [m/s]) type gnssType bias type identifier string PRN or station name (e.g. G23 or wtzz) receiverBiases sequence one element per station inputfileSignalBias filename signal bias file timeVariableBias sequence one entry per time variable bias type inputfileSignalBias filename columns: mjd, bias [m], (biasSlope [m/s]) type gnssType bias type identifier string PRN or station name (e.g. G23 or wtzz) agencyCode string identify the agency providing the data fileAgencyCode string identify the agency creating the file timeStart time start time of the data timeEnd time end time of the data biasMode choice absolute or relative bias estimates absolute relative observationSampling double [seconds] intervalLength double [seconds] interval for bias parameter representation determinationMethod string determination method used to generate the bias results (see SINEX Bias format description) receiverClockReferenceGnss string (G, R, E, C) reference GNSS used for receiver clock estimation satelliteClockReferenceObservables string one per system, reference code observable on first and second frequency (RINEX3 format) description string organization gathering/altering the file contents contact string contact name and/or email address input string brief description of the input used to generate this solution output string description of the file contents software string software used to generate the file hardware string computer hardware on which above software was run comment string comments in the comment block', 'display_text': 'Convert GNSS signal biases from GROOPS format to IGS SINEX Bias format. Biases can be provided via transmitterBiases and/or receiverBiases. Phase biases without attribute (e.g. L1*) are automatically expanded so each code bias has a corresponding phase bias (Example: C1C, C1W, L1* are converted to C1C, C1W, L1C, L1W).
Time-variable biases (e.g. GPS L5 satellite phase bias) can be provided via timeVariableBias. Their time span will be based on the provided epochs ($t \\pm \\Delta t / 2$). The slope of the bias can be optionally provided in the second data column.
If GLONASS receiver biases depend on frequency number, those must be defined in inputfileTransmitterInfo to get the correct PRN/SVN assignment to the biases.
See IGS SINEX Bias format description for further details on header information.
See also GnssSinexBias2SignalBias and GnssBiasClockAlignment.'},
'GnssSinexBias2SignalBias': { 'name': 'GnssSinexBias2SignalBias', 'key': 'GnssSinexBias2SignalBias', 'description': 'Converts GNSS signal biases from to GnssSignalBias format . Only satellite observable-specific signal biases (OSB) are supported at the moment. If multiple entries exist for the same bias, the weighted average (based on time span) of all entries is used. Time-variable biases are not supported at the moment. See also GnssSignalBias2SinexBias .', 'config_table': 'outputfileSignalBias filename identifier is appended to file name inputfileSinexBias filename inputfileGlonassSignalDefinition filename GLONASS frequency number mapping identifier string (empty = all) satellite PRN, e.g. G23 or E05', 'display_text': 'Converts GNSS signal biases from IGS SINEX Bias format to GnssSignalBias format.
Only satellite observable-specific signal biases (OSB) are supported at the moment. If multiple entries exist for the same bias, the weighted average (based on time span) of all entries is used. Time-variable biases are not supported at the moment.
See also GnssSignalBias2SinexBias.'},
'GnssStationLog2Platform': { 'name': 'GnssStationLog2Platform', 'key': 'GnssStationLog2Platform', 'description': 'Converts or to Converts . If Converts is provided, station log data is cross-checked with the given antenna definitions. Cross-checking station log data with a is possible with CheckStationsPlatformsWithSinex .', 'config_table': 'outputfileStationPlatform filename inputfileStationLog filename inputfileAntennaDefinition filename used to check antennas', 'display_text': 'Converts IGS station log format or IGS station log format v2.0 to outputfileStationPlatform.
If inputfileAntennaDefinition is provided, station log data is cross-checked with the given antenna definitions. Cross-checking station log data with a SINEX file is possible with CheckStationsPlatformsWithSinex.'},
'GnssTroposphere2TropoSinex': { 'name': 'GnssTroposphere2TropoSinex', 'key': 'GnssTroposphere2TropoSinex', 'description': 'Convert GNSS troposphere data from GROOPS format as estimated by GnssProcessing to format. For each station folling files are needed: Convert GNSS troposphere data from GROOPS format as estimated by , Convert GNSS troposphere data from GROOPS format as estimated by (MISVALUES), optional standard deviations with Convert GNSS troposphere data from GROOPS format as estimated by (MISVALUES), Convert GNSS troposphere data from GROOPS format as estimated by (VECTOR3D). The Convert GNSS troposphere data from GROOPS format as estimated by contains antenna center offsets and variations of all used antennas. Created via GnssAntex2AntennaDefinition or GnssAntennaDefinitionCreate . For considering the geoid height use Convert GNSS troposphere data from GROOPS format as estimated by as it might be computed by Gravityfield2GriddedData . The height closest to the station\'s position is used in each case. See also GnssProcessing .', 'config_table': 'outputfileTropoSinex filename station sequence inputfileStationInfo filename platform file inputfileTroposphereData filename Troposphere data estimates (columns: mjd, trodry, trowet, tgndry, tgnwet, tgedry, tgewet) inputfileTroposphereSigmas filename Troposphere data sigmas (columns: mjd, sigma_trowet, sigma_tgnwet, sigma_tgewet) inputfilePosition filename Precise station position (columns: mjd, x, y, z [m in TRF]) inputfileAntennaDefinition filename station phase centers and variations inputfileGriddedGeoidHeight filename value closest to the station\'s position is used in each case dataSamplingInterval double [sec] GNSS data sampling rate tropoSamplingInterval double [sec] Tropospheric parameter sampling interval tropoModelingMethod string Tropospheric estimation method: Filter, Smoother, Least Squares, Piece-Wise Linear Interpolation aPrioriTropoModel string A priori tropospheric model used tropoMappingFunction string Name of mapping function used for hydrostatic and wet delay gradientMappingFunction string Name of mapping function used for gradients metDataSource string source of surface meteorological observations used (see format desc.) observationWeighting string observation weighting model applied elevationCutoff double [deg] gnssSystems string G=GPS, R=GLONASS, E=Galileo, C=BeiDou timeSystem string G (GPS) or UTC oceanTideModel string Name of applied Ocean tide loading model atmosphericTideModel string Name of applied Atmospheric tide loading model geoidModel string Geoid model name for undulation values systemCode string Terrestrial reference system code remark string Remark used to identify the origin of the coordinates (AC acronym) antennaCalibrationModel string e.g. IGS20_WWWW (WWWW = ANTEX release GPS week) sinexTroHeader sequence agencyCode string Identify the agency providing the data timeStart time Start time of the data timeEnd time End time of the data observationCode string Technique used to generate the SINEX solution solutionContents string Marker name if single station, MIX if multiple stations description string Organizitions gathering/alerting the file contents output string Description of the file contents contact string Address of the relevant contact e-mail software string Software used to generate the file hardware string Computer hardware on which above software was run input string Brief description of the input used to generate this solution versionNumber string Unique identifier of the product, same as in file name, e.g. 000 inputfileComment filename comments in the comment block from a file (truncated at 80 characters per line) comment string comments in the comment block', 'display_text': 'Convert GNSS troposphere data from GROOPS format as estimated by GnssProcessing to IGS SINEX TRO format.
See also GnssProcessing.'},
'GoceXml2Gradiometer': { 'name': 'GoceXml2Gradiometer', 'key': 'GoceXml2Gradiometer', 'description': 'Read ESA XML GOCE Data. The outputfileGradiometer is written as instrument file (GRADIOMETER) .', 'config_table': 'outputfileGradiometer filename inputfile filename', 'display_text': 'Read ESA XML GOCE Data. The outputfileGradiometer is written as instrument file (GRADIOMETER).'},
'GoceXml2Orbit': { 'name': 'GoceXml2Orbit', 'key': 'GoceXml2Orbit', 'description': 'Read ESA XML GOCE Data.', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType rotation from TRF to CRF inputfile filename', 'display_text': 'Read ESA XML GOCE Data.'},
'GoceXml2StarCamera': { 'name': 'GoceXml2StarCamera', 'key': 'GoceXml2StarCamera', 'description': 'Read ESA XML GOCE Data.', 'config_table': 'outputfileStarCamera filename inputfile filename', 'display_text': 'Read ESA XML GOCE Data.'},
'GoceXmlEggNom1b': { 'name': 'GoceXmlEggNom1b', 'key': 'GoceXmlEggNom1b', 'description': 'Read ESA XML GOCE Data.', 'config_table': 'outputfileGradiometer filename outputfileAccelerometer filename outputfileStarCamera filename outputfileAngularRate filename outputfileAngularAcc filename inputfile filename', 'display_text': 'Read ESA XML GOCE Data.'},
'Grace2PotentialCoefficients': { 'name': 'Grace2PotentialCoefficients', 'key': 'Grace2PotentialCoefficients', 'description': 'This program converts potential coefficients from the GRACE SDS format into potential coefficients file . The program supports file formats for RL04 to RL06. Within the program, the variables epochStart , epochEnd and epochMid are populated with the corresponding time-stamps in the file. These can be used in to This program converts potential coefficients from the GRACE SDS format into to auto-generate the file name.', 'config_table': 'outputfilePotentialCoefficients filename variables: epochStart, epochEnd, epochMid inputfile filename', 'display_text': 'This program converts potential coefficients from the GRACE SDS format into potential coefficients file. The program supports file formats for RL04 to RL06.
Within the program, the variables epochStart, epochEnd and epochMid are populated with the corresponding time-stamps in the file. These can be used in to outputfilePotentialCoefficients to auto-generate the file name.'},
'GraceAccelerometer2L1bAscii': { 'name': 'GraceAccelerometer2L1bAscii', 'key': 'GraceAccelerometer2L1bAscii', 'description': 'Convert GROOPS accelerometer files to the GRACE SDS L1B ASCII format.', 'config_table': 'outputfileAscii filename ASCII outputfile inputfileAccelerometer filename GROOPS acceleromter file satelliteIdentifier string satellite identifier (A or B for GRACE, C or D for GRACE-FO) globalAttributes string additional attributes as \'key: value\' pairs', 'display_text': 'Convert GROOPS accelerometer files to the GRACE SDS L1B ASCII format.'},
'GraceAod2DoodsonHarmonics': { 'name': 'GraceAod2DoodsonHarmonics', 'key': 'GraceAod2DoodsonHarmonics', 'description': 'This program converts the atmospheric and ocean tidal products (AOD1B) from the GRACE SDS format into This program converts the atmospheric and ocean tidal products (AOD1B) from the GRACE SDS format into .', 'config_table': 'outputfileDoodsonHarmonics filename inputfileTideGeneratingPotential filename to compute Xi phase correction inputfile filename', 'display_text': 'This program converts the atmospheric and ocean tidal products (AOD1B) from the GRACE SDS format into outputfileDoodsonHarmonics.'},
'GraceAod2TimeSplines': { 'name': 'GraceAod2TimeSplines', 'key': 'GraceAod2TimeSplines', 'description': 'This program converts the atmospheric and ocean de-aliasing product (AOD1B) from the GRACE SDS format into time spline files . Multiple inputfile s must be given in the correct time order. A linear method is assumed for the interpolation between the given points in time. The GRACE SDS format is described in "AOD1B Product Description Document" given at http://podaac.jpl.nasa.gov/grace/documentation.html .', 'config_table': 'outputfileDealiasing filename outputfileAtmosphere filename outputfileOcean filename outputfileBottomPressure filename outputfileMisc filename inputfile filename', 'display_text': 'This program converts the atmospheric and ocean de-aliasing product (AOD1B) from the GRACE SDS format into time spline files. Multiple inputfiles must be given in the correct time order. A linear method is assumed for the interpolation between the given points in time.
The GRACE SDS format is described in "AOD1B Product Description Document" given at http://podaac.jpl.nasa.gov/grace/documentation.html.'},
'GraceCoefficients2BlockMeanTimeSplines': { 'name': 'GraceCoefficients2BlockMeanTimeSplines', 'key': 'GraceCoefficients2BlockMeanTimeSplines', 'description': 'This program converts potential coefficients from the GRACE SDS RL06 format into This program converts potential coefficients from the GRACE SDS RL06 format into . The This program converts potential coefficients from the GRACE SDS RL06 format into contains the mid points of non-empty intervals and This program converts potential coefficients from the GRACE SDS RL06 format into contains the monthly interval boundaries from first to last solution. The output will always be monthly block means. If the SDS solutions do vary or overlap, the nearest solution in terms of reference epoch is used.', 'config_table': 'outputfileTimeSplines filename outputfileTimeSplinesCovariance filename only the variances are saved outputfileTimeSeries filename mid points of non-empty intervals outputfileTimeIntervals filename monthly interval boundaries from first to last solution inputfile filename', 'display_text': 'This program converts potential coefficients from the GRACE SDS RL06 format into outputfileTimeSplines.
The output will always be monthly block means. If the SDS solutions do vary or overlap, the nearest solution in terms of reference epoch is used.'},
'GraceL1a2Accelerometer': { 'name': 'GraceL1a2Accelerometer', 'key': 'GraceL1a2Accelerometer', 'description': 'This program converts Level-1A accelerometer data (ACC1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACEiolib.h given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz . The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize .', 'config_table': 'outputfileAccelerometer filename ACCELEROMETER in SRF outputfileAngularAccelerometer filename ACCELEROMETER in SRF inputfile filename ACC1A', 'display_text': 'This program converts Level-1A accelerometer data (ACC1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACEiolib.h given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize.'},
'GraceL1a2SatelliteTracking': { 'name': 'GraceL1a2SatelliteTracking', 'key': 'GraceL1a2SatelliteTracking', 'description': 'This program converts Level-1A satellite tracking data (KBR1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACEiolib.h given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz . The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize .', 'config_table': 'outputfileSatelliteTracking filename MISCVALUES(ant_id, K_phase, Ka_phase, K_SNR, Ka_SNR) inputfile filename KBR1A', 'display_text': 'This program converts Level-1A satellite tracking data (KBR1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACEiolib.h given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize.'},
'GraceL1a2StarCamera': { 'name': 'GraceL1a2StarCamera', 'key': 'GraceL1a2StarCamera', 'description': 'This program converts orientation data measured by the star cameras from the GRACE Level-1A format (SCA1A) to the GROOPS instrument file format. For further information see GraceL1a2Accelerometer .', 'config_table': 'outputfileStarCamera1 filename STARCAMERA1A, head 1 outputfileStarCamera2 filename STARCAMERA1A, head 2 inputfile filename SCA1A, !GRACE-FO is not working!', 'display_text': 'This program converts orientation data measured by the star cameras from the GRACE Level-1A format (SCA1A) to the GROOPS instrument file format. For further information see GraceL1a2Accelerometer.'},
'GraceL1a2Temperature': { 'name': 'GraceL1a2Temperature', 'key': 'GraceL1a2Temperature', 'description': 'This program converts Level-1A temperature measurments (HRT1B or HRT1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACE given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz . Multiple inputfile s must be given in the correct time order. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize .', 'config_table': 'outputfileTemperature filename MISCVALUES inputfile filename HRT1B or HRT1A', 'display_text': 'This program converts Level-1A temperature measurments (HRT1B or HRT1A) to the GROOPS instrument file format. The GRACE Level-1A format is described in GRACE given at http://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/sw/GraceReadSW_L1_2010-03-31.tar.gz. Multiple inputfiles must be given in the correct time order. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize.'},
'GraceL1b2AccHousekeeping': { 'name': 'GraceL1b2AccHousekeeping', 'key': 'GraceL1b2AccHousekeeping', 'description': 'This program converts ACC housekeeping data (AHK1B or AHK1A) from the GRACE SDS format into instrument file (ACCHOUSEKEEPING) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileAccHousekeeping filename ACCHOUSEKEEPING inputfile filename AHK1B or AHK1A', 'display_text': 'This program converts ACC housekeeping data (AHK1B or AHK1A) from the GRACE SDS format into instrument file (ACCHOUSEKEEPING). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Accelerometer': { 'name': 'GraceL1b2Accelerometer', 'key': 'GraceL1b2Accelerometer', 'description': 'This program converts accelerometer data (ACC1B or ACT1B) from the GRACE SDS format into instrument file (ACCELEROMETER) . Multiple inputfile s must be given in the correct time order. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize . The GRACE SDS format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" given at https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/Handbook_1B_v1.3.pdf .', 'config_table': 'outputfileAccelerometer filename ACCELEROMETER outputfileAngularAccelerometer filename ACCELEROMETER outputfileFlags filename MISCVALUES(qualflg, acl_res.x, acl_res.y, acl_res.z) inputfile filename ACC1B or ACT1B', 'display_text': 'This program converts accelerometer data (ACC1B or ACT1B) from the GRACE SDS format into instrument file (ACCELEROMETER).
Multiple inputfiles must be given in the correct time order. The output is one arc of satellite data which can include data gaps. To split the arc in multiple gap free arcs use InstrumentSynchronize.
The GRACE SDS format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" given at https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/Handbook_1B_v1.3.pdf.'},
'GraceL1b2ClockOffset': { 'name': 'GraceL1b2ClockOffset', 'key': 'GraceL1b2ClockOffset', 'description': 'This program converts clock data (CLK1B or LLK1B) from the GRACE SDS format into instrument file (MISCVALUE) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileClock filename MISCVALUE inputfile filename CLK1B or LLK1B', 'display_text': 'This program converts clock data (CLK1B or LLK1B) from the GRACE SDS format into instrument file (MISCVALUE). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2GnssReceiver': { 'name': 'GraceL1b2GnssReceiver', 'key': 'GraceL1b2GnssReceiver', 'description': 'This program converts GPS receiver data (phase and pseudo range) data from the GRACE SDS format (GPS1B or GPS1A) into instrument file (GNSSRECEIVER) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileGnssReceiver filename GNSSRECEIVER inputfile filename GPS1B or GPS1A', 'display_text': 'This program converts GPS receiver data (phase and pseudo range) data from the GRACE SDS format (GPS1B or GPS1A) into instrument file (GNSSRECEIVER). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Magnetometer': { 'name': 'GraceL1b2Magnetometer', 'key': 'GraceL1b2Magnetometer', 'description': 'This program converts magnetometer data (MAG1B or MAG1A) from the GRACE SDS format into instrument file (MAGNETOMETER) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileMagnetometer filename MAGNETOMETER inputfile filename MAG1B or MAG1A', 'display_text': 'This program converts magnetometer data (MAG1B or MAG1A) from the GRACE SDS format into instrument file (MAGNETOMETER). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Mass': { 'name': 'GraceL1b2Mass', 'key': 'GraceL1b2Mass', 'description': 'This program converts mass data (MAS1B or MAS1A) from the GRACE SDS format into instrument file (MASS) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileMass filename MASS inputfile filename MAS1B or MAS1A', 'display_text': 'This program converts mass data (MAS1B or MAS1A) from the GRACE SDS format into instrument file (MASS). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Orbit': { 'name': 'GraceL1b2Orbit', 'key': 'GraceL1b2Orbit', 'description': 'This program converts the reduced dynamical orbit from the GRACE/GRACE-FO SDS format (GNV1B, GNI1B) into instrument file (ORBIT) . When GNV1B is used, the orbit can be rotated from the terrestrial reference frame (TRF) transformed into the celestial reference frame (CRF) by specifying This program converts the reduced dynamical orbit from the GRACE/GRACE-FO SDS format (GNV1B, GNI1B) into . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType to rotate GNV1B into CRF inputfile filename GNV1B/GNI1B', 'display_text': 'This program converts the reduced dynamical orbit from the GRACE/GRACE-FO SDS format (GNV1B, GNI1B) into instrument file (ORBIT).
When GNV1B is used, the orbit can be rotated from the terrestrial reference frame (TRF) transformed into the celestial reference frame (CRF) by specifying earthRotation.
For further information see GraceL1b2Accelerometer.'},
'GraceL1b2SatelliteTracking': { 'name': 'GraceL1b2SatelliteTracking', 'key': 'GraceL1b2SatelliteTracking', 'description': 'This program converts low-low satellite data measured by the K-band ranging system from the GRACE SDS format (KBR1B or LRI1B) into instrument file (SATELLITETRACKING) . The inputfile s contain also corrections to antenna offsets and the so called light time correction. The corrections can be stored in additional files in the same format as the observations. If a phase break is found an artificial gap is created. For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileSatelliteTracking filename SATELLITETRACKING outputfileAntCentr filename SATELLITETRACKING outputfileLighttime filename SATELLITETRACKING outputfileSNR filename MISCVALUES(K_A_SNR, Ka_A_SNR, K_B_SNR, Ka_B_SNR, qualflg) outputfileIonoCorr filename MISCVALUE inputfile filename KBR1B or LRI1B', 'display_text': 'This program converts low-low satellite data measured by the K-band ranging system from the GRACE SDS format (KBR1B or LRI1B) into instrument file (SATELLITETRACKING). The inputfiles contain also corrections to antenna offsets and the so called light time correction. The corrections can be stored in additional files in the same format as the observations. If a phase break is found an artificial gap is created. For further information see GraceL1b2Accelerometer.'},
'GraceL1b2StarCamera': { 'name': 'GraceL1b2StarCamera', 'key': 'GraceL1b2StarCamera', 'description': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GRACE SDS format (SCA1B) into instrument file (STARCAMERA) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileStarCamera filename outputfileStarCameraFlags filename MISCVALUES(sca_id, qual_rss, qualflg) inputfile filename SCA1B', 'display_text': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GRACE SDS format (SCA1B) into instrument file (STARCAMERA). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2StarCameraCovariance': { 'name': 'GraceL1b2StarCameraCovariance', 'key': 'GraceL1b2StarCameraCovariance', 'description': 'This program computes star camera covariance matrices ( instrument file, COVARIANE3D ) for a GRACE satellite under consideration of the active camera heads and an a priori variance factor.', 'config_table': 'outputfileStarCameraCovariance filename inputfileStarCameraFlags filename inputfileSequenceOfEventsQSA filename sigma0 double [seconds of arc]', 'display_text': 'This program computes star camera covariance matrices (instrument file, COVARIANE3D) for a GRACE satellite under consideration of the active camera heads and an a priori variance factor.'},
'GraceL1b2SteeringMirror': { 'name': 'GraceL1b2SteeringMirror', 'key': 'GraceL1b2SteeringMirror', 'description': 'This program converts GRACE-FO Steering Mirror output (LSM1B) to an instrument file (STARCAMERA) .', 'config_table': 'outputfileStarCamera filename inputfile filename LSM1B', 'display_text': 'This program converts GRACE-FO Steering Mirror output (LSM1B) to an instrument file (STARCAMERA).'},
'GraceL1b2Thruster': { 'name': 'GraceL1b2Thruster', 'key': 'GraceL1b2Thruster', 'description': 'This program converts thruster data (THR1B or THR1A) from the GRACE SDS format into instrument file (THRUSTER) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileThruster filename THRUSTER inputfile filename THR1B or THR1A', 'display_text': 'This program converts thruster data (THR1B or THR1A) from the GRACE SDS format into instrument file (THRUSTER). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2TimeOffset': { 'name': 'GraceL1b2TimeOffset', 'key': 'GraceL1b2TimeOffset', 'description': 'This program converts time data (TIM1A or TIM1B) from the GRACE SDS format into instrument file (MISCVALUE) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileTime filename MISCVALUE fractionalScale double 1e-6 for GRACE, 1e-9 for GRACE-FO inputfile filename TIM1A or TIM1B', 'display_text': 'This program converts time data (TIM1A or TIM1B) from the GRACE SDS format into instrument file (MISCVALUE). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Uso': { 'name': 'GraceL1b2Uso', 'key': 'GraceL1b2Uso', 'description': 'This program converts clock data (USO1B) from the GRACE SDS format into instrument file (MISCVALUES) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileUso filename MISCVALUES(uso_freq, K_freq, Ka_freq) inputfile filename USO1B', 'display_text': 'This program converts clock data (USO1B) from the GRACE SDS format into instrument file (MISCVALUES). For further information see GraceL1b2Accelerometer.'},
'GraceL1b2Vector': { 'name': 'GraceL1b2Vector', 'key': 'GraceL1b2Vector', 'description': 'This program reads vector orientation data (positions of instruments in the satellite frame) from the GRACE SDS format (VGB1B, VGN1B, VGO1B, VKB1B, or VCM1B). The This program reads vector orientation data (positions of instruments in the satellite frame) from the GRACE SDS format (VGB1B, VGN1B, VGO1B, VKB1B, or VCM1B). The is a matrix containing for each record. The GRACE SDS format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" given at http://podaac.jpl.nasa.gov/grace/documentation.html .', 'config_table': 'outputfileVector filename inputfile filename VGB1B, VGN1B, VGO1B, VKB1B, or VCM1B', 'display_text': 'This program reads vector orientation data (positions of instruments in the satellite frame) from the GRACE SDS format (VGB1B, VGN1B, VGO1B, VKB1B, or VCM1B). The outputfileVector is a $(3n\\times1)$ matrix containing $(x,y,z)$ for each record. The GRACE SDS format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" given at http://podaac.jpl.nasa.gov/grace/documentation.html.'},
'GraceSequenceOfEvents': { 'name': 'GraceSequenceOfEvents', 'key': 'GraceSequenceOfEvents', 'description': 'This program converts the GRACE SOE (sequence of events) file/format into instrument file (MISCVALUES) . The GRACE SOE format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" and "TN-03 SOE format.txt" given at http://podaac.jpl.nasa.gov/grace/documentation.html . The output is one arc of satellite data which can include data gaps.', 'config_table': 'outputfileGraceA filename outputfileGraceB filename inputfile filename SoE file events choice ACCT sequence DSHL HeaterDisconnect mode choice Heater DSHL HeaterDisconnect SetPoint temperature set point AOCS sequence coarse pointing mode or attitude hold mode mode choice CPM coarse pointing mode AHM attitude hold mode SM science mode ACCR ACCR CMCAL sequence CoM calibration maneuver sampling double [seconds] create events between start and end of maneuver KBRCAL sequence KBR calibration maneuver sampling double [seconds] create events between start and end of maneuver VCM CoM coordinates in SRF (m) VKB KBR phase center coordinates in SRF (m) ICUVP ICUVP IPU IPU IPUR IPUR KAMI KAMI: time tag offset to Ka-phase meas. KMI K_MI: time tag offset to K-phase meas. KTOFF KTOFF: time tag offset to KBR meas. MANV MANV MTE1 MTE1 MTE2 MTE2 OCC OCC QSA SCA to SRF frame rotation QKS SCA to KBR frame rotation', 'display_text': 'This program converts the GRACE SOE (sequence of events) file/format into instrument file (MISCVALUES). The GRACE SOE format is described in "GRACE Level 1B Data Product User Handbook JPL D-22027" and "TN-03_SOE_format.txt" given at http://podaac.jpl.nasa.gov/grace/documentation.html. The output is one arc of satellite data which can include data gaps.'},
'GrailCdr2Orbit': { 'name': 'GrailCdr2Orbit', 'key': 'GrailCdr2Orbit', 'description': 'This program converts the orbit from the GRAIL SDS format into instrument file (ORBIT) .', 'config_table': 'outputfileOrbit filename inputfile filename', 'display_text': 'This program converts the orbit from the GRAIL SDS format into instrument file (ORBIT).'},
'GrailCdr2SatelliteTracking': { 'name': 'GrailCdr2SatelliteTracking', 'key': 'GrailCdr2SatelliteTracking', 'description': 'This program converts low-low satellite data measured by the K-band ranging system from the GRAIL format into instrument file (SATELLITETRACKING) . The inputfile s contain also corrections for antenna offsets and the so called light time correction. The corrections can be stored in additional files in the same format as the observations. If a phase break is found an artificial gap is created.', 'config_table': 'outputfileSatelliteTracking filename outputfileAntCentr filename outputfileLighttime filename outputfileTemperature filename approximateTimeBias double [seconds] inputfile filename', 'display_text': 'This program converts low-low satellite data measured by the K-band ranging system from the GRAIL format into instrument file (SATELLITETRACKING). The inputfiles contain also corrections for antenna offsets and the so called light time correction. The corrections can be stored in additional files in the same format as the observations. If a phase break is found an artificial gap is created.'},
'GrailCdr2StarCamera': { 'name': 'GrailCdr2StarCamera', 'key': 'GrailCdr2StarCamera', 'description': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GRAIL SDS format into instrument file (STARCAMERA) . For further information see GraceL1b2Accelerometer .', 'config_table': 'outputfileStarCamera filename inputfile filename', 'display_text': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GRAIL SDS format into instrument file (STARCAMERA). For further information see GraceL1b2Accelerometer.'},
'GriddedData2NetCdf': { 'name': 'GriddedData2NetCdf', 'key': 'GriddedData2NetCdf', 'description': 'This program converts a This program converts a to a COARDS compliant NetCDF file. The output data can be defined with dataVariable . You should add at least the attributes units , long_name , and maybe _FillValue to the variables. For the dataVariable:value the standard dataVariables are available to select the data columns of This program converts a . See also NetCdfInfo , GriddedDataTimeSeries2NetCdf , NetCdf2GriddedData .', 'config_table': 'outputfileNetCdf filename file name of NetCDF output inputfileGriddedData filename input grid sequence dataVariable sequence metadata for data variables name string netCDF variable name value expression expression (variables \'height\', \'data\', \'longitude\', \'latitude\' and, \'area\' are taken from the gridded data dataType choice double float int attribute choice netCDF attributes text sequence name string value string value sequence name string value double dataType choice double float int globalAttribute choice additional meta data text sequence name string value string value sequence name string value double dataType choice double float int', 'display_text': 'This program converts a inputfileGriddedData to a COARDS compliant NetCDF file. The output data can be defined with dataVariable. You should add at least the attributes units, long_name, and maybe _FillValue to the variables. For the dataVariable:value the standard dataVariables are available to select the data columns of inputfileGriddedData.
See also NetCdfInfo, GriddedDataTimeSeries2NetCdf, NetCdf2GriddedData.'},
'GriddedDataTimeSeries2NetCdf': { 'name': 'GriddedDataTimeSeries2NetCdf', 'key': 'GriddedDataTimeSeries2NetCdf', 'description': 'Read a Read a and converts it to a COARDS compliant NetCDF file. The output data can be defined with dataVariable . You should add at least the attributes units , long_name , and maybe _FillValue to the variables. The dataVariable:inputColumn selects the data from the input file. If Read a is not set the temporal nodal points from the inputfile are used. See also NetCdfInfo , GriddedData2NetCdf , NetCdf2GriddedDataTimeSeries .', 'config_table': 'outputfileNetCdf filename file name of NetCDF output inputfileGriddedDataTimeSeries filename timeSeries timeSeriesType otherwise times from inputfile are used dataVariable sequence metadata for data variables name string netCDF variable name inputColumn uint input data column dataType choice double float int attribute choice netCDF attributes text sequence name string value string value sequence name string value double dataType choice double float int globalAttribute choice additional meta data text sequence name string value string value sequence name string value double dataType choice double float int', 'display_text': 'Read a inputfileGriddedDataTimeSeries and converts it to a COARDS compliant NetCDF file.
The output data can be defined with dataVariable. You should add at least the attributes units, long_name, and maybe _FillValue to the variables. The dataVariable:inputColumn selects the data from the input file.
If timeSeries is not set the temporal nodal points from the inputfile are used.
See also NetCdfInfo, GriddedData2NetCdf, NetCdf2GriddedDataTimeSeries.'},
'GroopsAscii2Orbit': { 'name': 'GroopsAscii2Orbit', 'key': 'GroopsAscii2Orbit', 'description': 'Read Orbits given in groops kinematic orbit ASCII format with covariance information. See also Orbit2GroopsAscii .', 'config_table': 'outputfileOrbit filename outputfileCovariance filename earthRotation earthRotationType inputfile filename', 'display_text': 'Read Orbits given in groops kinematic orbit ASCII format with covariance information.
See also Orbit2GroopsAscii.'},
'Hw2TideGeneratingPotential': { 'name': 'Hw2TideGeneratingPotential', 'key': 'Hw2TideGeneratingPotential', 'description': 'Write tide generating potential from Hartmann and Wenzel 1995 file, https://doi.org/10.1029/95GL03324 .', 'config_table': 'outputfileTideGeneratingPotential filename inputfile filename headerLines uint skip number of header lines referenceTime time reference time', 'display_text': 'Write tide generating potential from Hartmann and Wenzel 1995 file, https://doi.org/10.1029/95GL03324.'},
'Icgem2PotentialCoefficients': { 'name': 'Icgem2PotentialCoefficients', 'key': 'Icgem2PotentialCoefficients', 'description': 'Read spherical harmonics in ICGEM format ( http://icgem.gfz-potsdam.de/ ).', 'config_table': 'outputfileStaticCoefficients filename static potential coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid; (icgem1.0) epochReference outputfileTrendCoefficients filename trend potential coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid; (icgem1.0) epochReference outputfileOscillationCosine filename oscillation cosine coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid, oscillationPeriod; (icgem1.0) epochReference, oscillationPeriod outputfileOscillationSine filename oscillation sine coefficients in GROOPS gfc format. Available variables (icgem2.0): epochStart, epochEnd, epochMid, oscillationPeriod; (icgem1.0) epochReference, oscillationPeriod outputfileIntervals filename two column ASCII file with all intervals found (only sensible for icgem2.0). The base name will be extended with .static, .trend, .annualCos, and .annualSin. inputfileIcgem filename ICGEM GFC file useFormalErrors boolean use formal errors if both formal and calibrated errors are given', 'display_text': 'Read spherical harmonics in ICGEM format (http://icgem.gfz-potsdam.de/).'},
'Iers2OceanPoleTide': { 'name': 'Iers2OceanPoleTide', 'key': 'Iers2OceanPoleTide', 'description': 'Read ocean pole tide model according to IERS conventions and convert into oceanPoleTide file .', 'config_table': 'outputfileOceanPole filename inputfile filename inputfileLoadingLoveNumber filename maxDegree uint GM double Geocentric gravitational constant R double Reference radius Omega double [rad/s] earth rotation rho double [kg/m**3] density of sea water G double [m**3/(kg*s**2)] gravitational constant g double [m/s**2] gravity', 'display_text': 'Read ocean pole tide model according to IERS conventions and convert into oceanPoleTide file.'},
'IersC04IAU2000EarthOrientationParameter': { 'name': 'IersC04IAU2000EarthOrientationParameter', 'key': 'IersC04IAU2000EarthOrientationParameter', 'description': 'Read a IERS Earth orientation data C04 (IAU2000A) file and write it as Read a IERS Earth orientation data C04 (IAU2000A) file and write it as .', 'config_table': 'outputfileEOP filename inputfile filename timeStart time timeEnd time', 'display_text': 'Read a IERS Earth orientation data C04 (IAU2000A) file and write it as outputfileEOP.'},
'IersHighFrequentEop2DoodsonEop': { 'name': 'IersHighFrequentEop2DoodsonEop', 'key': 'IersHighFrequentEop2DoodsonEop', 'description': 'Read Diurnal and Subdiurnal Earth Orientation variations according to updated IERS 2010 conventions and write them as Read Diurnal and Subdiurnal Earth Orientation variations according to updated IERS 2010 conventions and write them as .', 'config_table': 'outputfileDoodsonEOP filename inputfile filename', 'display_text': 'Read Diurnal and Subdiurnal Earth Orientation variations according to updated IERS 2010 conventions and write them as outputfileDoodsonEOP.'},
'IersPotential2DoodsonHarmonics': { 'name': 'IersPotential2DoodsonHarmonics', 'key': 'IersPotential2DoodsonHarmonics', 'description': 'Read ocean tide file in IERS format.', 'config_table': 'outputfileDoodsonHarmoncis filename inputfile filename headerLines uint skip number of header lines minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'Read ocean tide file in IERS format.'},
'IersRapidIAU2000EarthOrientationParameter': { 'name': 'IersRapidIAU2000EarthOrientationParameter', 'key': 'IersRapidIAU2000EarthOrientationParameter', 'description': 'Read a IERS Earth orientation rapid data and prediction file (IAU2000) and write it as Read a IERS Earth orientation rapid data and prediction file (IAU2000) and write it as .', 'config_table': 'outputfileEOP filename inputfile filename timeStart time timeEnd time', 'display_text': 'Read a IERS Earth orientation rapid data and prediction file (IAU2000) and write it as outputfileEOP.'},
'IersWaterHeight2DoodsonHarmonics': { 'name': 'IersWaterHeight2DoodsonHarmonics', 'key': 'IersWaterHeight2DoodsonHarmonics', 'description': 'Read ocean tide file in IERS format.', 'config_table': 'outputfileDoodsonHarmoncis filename inputfile filename headerLines uint skip number of header lines inputfileTideGeneratingPotential filename to compute Xi phase correction kernel kernelType data type of input values factor double to convert in SI units minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius', 'display_text': 'Read ocean tide file in IERS format.'},
'Igs2EarthOrientationParameter': { 'name': 'Igs2EarthOrientationParameter', 'key': 'Igs2EarthOrientationParameter', 'description': 'Read Rapid Earth Orientation Parameter from IGS daily file and write it as Read Rapid Earth Orientation Parameter from IGS daily file and write it as .', 'config_table': 'outputfileEOP filename inputfile filename timeStart time timeEnd time', 'display_text': 'Read Rapid Earth Orientation Parameter from IGS daily file and write it as outputfileEOP.'},
'Jason2Starcamera': { 'name': 'Jason2Starcamera', 'key': 'Jason2Starcamera', 'description': 'This program reads in Jason star camera data given in a special format. Files available at: cddis.gsfc.nasa.gov/pub/doris/ancillary/quaternions/ja2/ . A description of the format can be found under: ftp://ftp.ids-doris.org/pub/ids/ancillary/quaternions/jason1_2_quaternion_solar_panel.pdf', 'config_table': 'outputfileStarCamera filename jasonNumber uint Jason number (different file format), 1 for Sentinel inputfile filename', 'display_text': 'This program reads in Jason star camera data given in a special format. Files available at: cddis.gsfc.nasa.gov/pub/doris/ancillary/quaternions/ja2/. A description of the format can be found under: ftp://ftp.ids-doris.org/pub/ids/ancillary/quaternions/jason1_2_quaternion_solar_panel.pdf'},
'JplAscii2Ephemerides': { 'name': 'JplAscii2Ephemerides', 'key': 'JplAscii2Ephemerides', 'description': 'Read JPL DExxx (ASCII) ephemerides.', 'config_table': 'outputfileEphemerides filename inputfileHeader filename inputfileData filename', 'display_text': 'Read JPL DExxx (ASCII) ephemerides.'},
'Merit2FullRate': { 'name': 'Merit2FullRate', 'key': 'Merit2FullRate', 'description': 'Converts and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including full rate data like range, accuracy, wavelength, azimuth and elevation.', 'config_table': 'outputfileNormalPoints filename variable {station} available outputfileMeteorological filename variable {station} available inputfileSlrData filename SLR MERIT II file', 'display_text': 'Converts MERIT II file and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including full rate data like range, accuracy, wavelength, azimuth and elevation.'},
'Merit2NormalPoints': { 'name': 'Merit2NormalPoints', 'key': 'Merit2NormalPoints', 'description': 'Converts and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength, window size, azimuth and elevation.', 'config_table': 'outputfileNormalPoints filename variable {station} available outputfileMeteorological filename variable {station} available inputfileSlrData filename SLR MERIT II file', 'display_text': 'Converts MERIT II file and writes an instrument file (METEOROLOGICAL) including meteorological data like temperature, air pressure and humidity as well as an instrument file (SATELLITELASERRANGING) including normal point data like range, accuracy, redundancy, wavelength, window size, azimuth and elevation.'},
'NetCdf2GriddedData': { 'name': 'NetCdf2GriddedData', 'key': 'NetCdf2GriddedData', 'description': 'This program converts a COARDS compliant NetCDF file into an This program converts a COARDS compliant NetCDF file into an . If no specific input variableNameData are selected all suitable data are used. If the NETCDF file contains a time axis ( variableNameData ) an specific epoch can be selected with time . The nearest epoch in file is used. See also NetCdfInfo , GriddedData2NetCdf , NetCdf2GriddedDataTimeSeries .', 'config_table': 'outputfileGriddedData filename inputfileNetCdf filename variableNameLongitude string name of NetCDF variable variableNameLatitude string name of NetCDF variable variableNameTime string if with time axis: name of NetCDF variable variableNameData string data variables, otherwise all suitable data are used time time if with time axis: nearest epoch is used R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program converts a COARDS compliant NetCDF file into an outputfileGriddedData. If no specific input variableNameData are selected all suitable data are used.
If the NETCDF file contains a time axis (variableNameData) an specific epoch can be selected with time. The nearest epoch in file is used.
See also NetCdfInfo, GriddedData2NetCdf, NetCdf2GriddedDataTimeSeries.'},
'NetCdf2GriddedDataTimeSeries': { 'name': 'NetCdf2GriddedDataTimeSeries', 'key': 'NetCdf2GriddedDataTimeSeries', 'description': 'This program converts a COARDS compliant NetCDF file into This program converts a COARDS compliant NetCDF file into . If no specific input variableNameData are selected all suitable data are used. See also NetCdfInfo , NetCdf2GriddedData , GriddedDataTimeSeries2NetCdf .', 'config_table': 'outputfileGriddedDataTimeSeries filename inputfileNetCdf filename variableNameLongitude string name of NetCDF variable variableNameLatitude string name of NetCDF variable variableNameTime string name of NetCDF variable) variableNameData string data variables, otherwise all suitable data are used R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'This program converts a COARDS compliant NetCDF file into outputfileGriddedDataTimeSeries. If no specific input variableNameData are selected all suitable data are used.
See also NetCdfInfo, NetCdf2GriddedData, GriddedDataTimeSeries2NetCdf.'},
'NetCdfInfo': { 'name': 'NetCdfInfo', 'key': 'NetCdfInfo', 'description': 'Print content information of a NetCDF file like dimensions, variables and attributes. See also NetCdf2GriddedData , NetCdf2GriddedDataTimeSeries , GriddedData2NetCdf , GriddedDataTimeSeries2NetCdf .', 'config_table': 'inputfileNetCdf filename', 'display_text': 'Print content information of a NetCDF file like dimensions, variables and attributes.
See also NetCdf2GriddedData, NetCdf2GriddedDataTimeSeries, GriddedData2NetCdf, GriddedDataTimeSeries2NetCdf.'},
'NormalsSphericalHarmonics2Sinex': { 'name': 'NormalsSphericalHarmonics2Sinex', 'key': 'NormalsSphericalHarmonics2Sinex', 'description': 'Write potential coefficients and normal equations to . See also Sinex2Normals and GnssNormals2Sinex .', 'config_table': 'outputfileSinex filename solutions in SINEX format inputfileNormals filename normal equation matrix inputfileSolution filename parameter vector inputfileSigmax filename standard deviations of the parameters (sqrt of the diagonal of the inverse normal equation) inputfileApriori filename apriori parameter vector inputfileAprioriMatrix filename normal equation matrix of applied constraints time time reference time for parameters sinexHeader sequence agencyCode string identify the agency providing the data timeStart time start time of the data timeEnd time end time of the data observationCode string technique used to generate the SINEX solution constraintCode string 0: tight constraint, 1: siginficant constraint, 2: unconstrained solutionContent string solution types contained in the SINEX solution (S O E T C A) description string organizitions gathering/alerting the file contents contact string Address of the relevant contact. e-mail output string Description of the file contents input string Brief description of the input used to generate this solution software string Software used to generate the file hardware string Computer hardware on which above software was run inputfileComment filename comments in the comment block from a file (truncated at 80 characters) comment string comments in the comment block', 'display_text': 'Write potential coefficients and normal equations to SINEX format.
See also Sinex2Normals and GnssNormals2Sinex.'},
'OceanTidesDTU2GriddedData': { 'name': 'OceanTidesDTU2GriddedData', 'key': 'OceanTidesDTU2GriddedData', 'description': 'Convert DTU ocean tide grids to griddedData (amplitude, phase).', 'config_table': 'outputfileGriddedData filename data0=amplitude, data1=phase inputfile filename R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'Convert DTU ocean tide grids to griddedData (amplitude, phase).'},
'Orbit2Cpf': { 'name': 'Orbit2Cpf', 'key': 'Orbit2Cpf', 'description': 'Writes groops orbits to . The coordinate system used in the CPF format is usually presented in ITRF. The required time format for the input orbit file is GPS. The time format of the output CPF file is given in UTC. See also Cpf2Orbit .', 'config_table': 'outputfile filename inputfileOrbit filename inputfileSatelliteInfo filename Platform File earthRotation earthRotationType versionNumber uint Version number of production day with zero leading fill, e.g. 01 targetClass uint set 1 for passive retroreflector, set 0 for no retroreflector (includes debris)', 'display_text': 'Writes groops orbits to CPF file.
The coordinate system used in the CPF format is usually presented in ITRF. The required time format for the input orbit file is GPS. The time format of the output CPF file is given in UTC.
See also Cpf2Orbit.'},
'Orbit2GraceL1b': { 'name': 'Orbit2GraceL1b', 'key': 'Orbit2GraceL1b', 'description': 'This program converts an instrument file (ORBIT) specified in the celestial reference frame (CRF) to the GRACE/GRACE-FO SDS format (GNV1B, GNI1B). If This program converts an is provided, the orbit is rotated into the terrestrial reference frame as required for the GNV1B product; otherwise, a GNI1B product is written. The text file inputfileHeader is placed at the beginning of the outputfile . The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin} , {epochmax} , and {epochcount} . See also GraceL1b2Orbit .', 'config_table': 'outputfile filename GNV1B/GNI1B inputfileHeader filename YAML Header, {epochmin}, {epochmax}, {epochcount} available inputfileOrbit filename satelliteId string A, B, C or D earthRotation earthRotationType rotation into Earth fixed frame', 'display_text': 'This program converts an instrument file (ORBIT) specified in the celestial reference frame (CRF) to the GRACE/GRACE-FO SDS format (GNV1B, GNI1B). If earthRotation is provided, the orbit is rotated into the terrestrial reference frame as required for the GNV1B product; otherwise, a GNI1B product is written.
The text file inputfileHeader is placed at the beginning of the outputfile. The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin}, {epochmax}, and {epochcount}.
See also GraceL1b2Orbit.'},
'Orbit2GroopsAscii': { 'name': 'Orbit2GroopsAscii', 'key': 'Orbit2GroopsAscii', 'description': 'Convert groops orbits and corresponding covariance information to ASCII format. The format is used to publish TUG orbits. It contains a two line header with a short description of the orbit defined in firstLine . The orbit is rotated to the Earth fixed frame (TRF) with Convert groops orbits and corresponding covariance information to ASCII format. The format is used to publish TUG orbits. It contains a two line header with a short description of the orbit defined in and given as one line per epoch. The epoch lines contained time [MJD GPS time], position x, y and z [m], and the epoch covariance xx, yy, zz, xy, xz and yz [ ]. See also GroopsAscii2Orbit .', 'config_table': 'outputfile filename inputfileOrbit filename inputfileCovariance filename earthRotation earthRotationType firstLine string Text for first line', 'display_text': 'Convert groops orbits and corresponding covariance information to ASCII format. The format is used to publish TUG orbits. It contains a two line header with a short description of the orbit defined in firstLine. The orbit is rotated to the Earth fixed frame (TRF) with earthRotation and given as one line per epoch. The epoch lines contained time [MJD GPS time], position x, y and z [m], and the epoch covariance xx, yy, zz, xy, xz and yz [$m^2$].
See also GroopsAscii2Orbit.'},
'Orbit2Sp3Format': { 'name': 'Orbit2Sp3Format', 'key': 'Orbit2Sp3Format', 'description': 'Writes orbits to . SP3 orbits are usually given in the terrestrial reference frame (TRF), so providing Writes orbits to automatically rotates the orbits from the celestial reference frame (CRF) to the TRF. Since SP3 orbits often use the center of Earth as a reference, a correction from center of mass to center of Earth can be applied to the orbits by providing Writes orbits to (e.g. ocean tides). See also Sp3Format2Orbit .', 'config_table': 'outputfile filename satellite sequence inputfileOrbit filename inputfileClock filename inputfileCovariance filename identifier string 3 characters (e.g. GNSS PRN: G01) orbitAccuracy double [m] used for accuracy codes in header (0 = unknown) earthRotation earthRotationType rotate data into Earth-fixed frame gravityfield gravityfieldType degree 1 fluid mantle for CM2CE correction (SP3 orbits should be in center of Earth) comment string comment lines (77 char max) firstLine string Text for first line e.g: u+U IGb14 KIN ITSG writeVelocity boolean write velocity in addition to position useSp3kFormat boolean use the extended sp3k format', 'display_text': 'Writes orbits to SP3 format.
SP3 orbits are usually given in the terrestrial reference frame (TRF), so providing earthRotation automatically rotates the orbits from the celestial reference frame (CRF) to the TRF. Since SP3 orbits often use the center of Earth as a reference, a correction from center of mass to center of Earth can be applied to the orbits by providing gravityfield (e.g. ocean tides).
See also Sp3Format2Orbit.'},
'PotentialCoefficients2Icgem': { 'name': 'PotentialCoefficients2Icgem', 'key': 'PotentialCoefficients2Icgem', 'description': 'Write spherical harmonics in ICGEM format. GROOPS uses this format as default but this program enables the possibility to include comments and set the modelname.', 'config_table': 'outputfile filename inputfilePotentialCoefficients filename inputfileTrend filename oscillation sequence inputfileCosPotentialCoefficients filename inputfileSinPotentialCoefficients filename period string period of oscillation [year] comment string comment in header inputfileComment filename file containing comments for header modelname string name of the model tideSystem choice tide system of model zero_tide tide_free minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius time time reference time', 'display_text': 'Write spherical harmonics in ICGEM format. GROOPS uses this format as default but this program enables the possibility to include comments and set the modelname.'},
'PsmslOceanBottomPressure2TimeSeries': { 'name': 'PsmslOceanBottomPressure2TimeSeries', 'key': 'PsmslOceanBottomPressure2TimeSeries', 'description': 'This programs reads ocean bottom pressure time series from the Permanent Service for Mean Sea Level (PSMSL). In addition to the OBP measurements, the recorder position can be written to a grid file .', 'config_table': 'outputfileTimeSeries filename outputfilePosition filename recorder position as gridded data inputfile filename isDaily boolean ignoreBadData boolean R double inverseFlattening double timeSeries timeSeriesType', 'display_text': 'This programs reads ocean bottom pressure time series from the Permanent Service for Mean Sea Level (PSMSL).
In addition to the OBP measurements, the recorder position can be written to a grid file.'},
'RinexObservation2GnssReceiver': { 'name': 'RinexObservation2GnssReceiver', 'key': 'RinexObservation2GnssReceiver', 'description': 'Converts (version 2, 3, and 4) and observation files to Converts . In case of observation files containing GLONASS satellites, a mapping from PRN to frequency number must be provided via Converts , see SinexMetadata2GlonassFrequencyNumber . RINEX v3+ observation files already contain this information. Converts and Converts can be used to filter the observation types that will be exported. If Converts is set, RINEX antenna and receiver info will be cross-checked with the provided file and warnings are raised in case of differences. A list of semi-codeless GPS receivers (observing C2D instead of C2W) can be provided via Converts with one receiver name per line. Observation types will be automatically corrected for these receivers. Some LEO satellites use special RINEX observation types, either from the unofficial RINEX v2.20 or custom ones. These can be provided via Converts . The file must must contain a table with two columns, the first being the special type, and the second being the equivalent RINEX v3 type.', 'config_table': 'outputfileGnssReceiver filename inputfileRinexObservation filename RINEX or Compact RINEX observation files inputfileMatrixPrn2FrequencyNumber filename (required for RINEX v2 files containing GLONASS observations), matrix with columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber inputfileStationInfo filename used to determine semi-codeless receivers and to cross-check antenna and receiver info inputfileSemiCodelessReceivers filename list with one receiver name per line inputfileSpecialObservationTypes filename table mapping special observation types to RINEX 3 types, e.g.: LA L1C useType gnssType only use observations that match any of these patterns ignoreType gnssType ignore observations that match any of these patterns', 'display_text': 'Converts RINEX (version 2, 3, and 4) and Compact RINEX observation files to outputfileGnssReceiver.
useType and ignoreType can be used to filter the observation types that will be exported.
If inputfileStationInfo is set, RINEX antenna and receiver info will be cross-checked with the provided file and warnings are raised in case of differences.
A list of semi-codeless GPS receivers (observing C2D instead of C2W) can be provided via inputfileSemiCodelessReceivers with one receiver name per line. Observation types will be automatically corrected for these receivers.
Some LEO satellites use special RINEX observation types, either from the unofficial RINEX v2.20 or custom ones. These can be provided via inputfileSpecialObservationTypes. The file must must contain a table with two columns, the first being the special type, and the second being the equivalent RINEX v3 type.
'},
'Sacc2Orbit': { 'name': 'Sacc2Orbit', 'key': 'Sacc2Orbit', 'description': 'This program reads in SACC orbit data.', 'config_table': 'outputfileOrbit filename inputfile filename', 'display_text': 'This program reads in SACC orbit data.'},
'SatelliteTracking2GraceL1b': { 'name': 'SatelliteTracking2GraceL1b', 'key': 'SatelliteTracking2GraceL1b', 'description': 'This program converts low-low satellite tracking data (KBR or LRI) from the GROOPS format instrument file (SATELLITETRACKING) to the GRACE SDS format (KBR1B or LRI1B). It reads the satellite tracking data and optionally corrections (antenna offsets and light time corrections) and flags into one outputfile . The text file inputfileHeader is placed at the beginning of the outputfile . The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin} , {epochmax} , and {epochcount} . See also GraceL1b2SatelliteTracking .', 'config_table': 'outputfile filename KBR1B or LRI1B inputfileHeader filename YAML Header, {epochmin}, {epochmax}, {epochcount} available inputfileSatelliteTracking filename SATELLITETRACKING inputfileIonoCorr filename MISCVALUE inputfileLighttime filename SATELLITETRACKING inputfileAntCentr filename SATELLITETRACKING inputfileSNR filename MISCVALUES(K_A_SNR, Ka_A_SNR, K_B_SNR, Ka_B_SNR, qualflg)', 'display_text': 'This program converts low-low satellite tracking data (KBR or LRI) from the GROOPS format instrument file (SATELLITETRACKING) to the GRACE SDS format (KBR1B or LRI1B). It reads the satellite tracking data and optionally corrections (antenna offsets and light time corrections) and flags into one outputfile.
The text file inputfileHeader is placed at the beginning of the outputfile. The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin}, {epochmax}, and {epochcount}.
See also GraceL1b2SatelliteTracking.'},
'Sentinel2StarCamera': { 'name': 'Sentinel2StarCamera', 'key': 'Sentinel2StarCamera', 'description': 'This program reads in Sentinel-1/2/3 star camera data given in the special format.', 'config_table': 'outputfileStarCamera filename inputfile filename', 'display_text': 'This program reads in Sentinel-1/2/3 star camera data given in the special format.'},
'SentinelXml2Orbit': { 'name': 'SentinelXml2Orbit', 'key': 'SentinelXml2Orbit', 'description': 'Read Sentinel orbits from XML format.', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType inputfile filename', 'display_text': 'Read Sentinel orbits from XML format.'},
'Sinex2Normals': { 'name': 'Sinex2Normals', 'key': 'Sinex2Normals', 'description': 'Convert normal equations from to normal equations . See also GnssNormals2Sinex and NormalsSphericalHarmonics2Sinex .', 'config_table': 'outputfileNormals filename N, n: unconstrained normal equations outputfileNormalsConstraint filename N0, n0: normal equations of applied constraints outputfileSolution filename x: parameter vector outputfileSolutionApriori filename x0: a priori parameter vector inputFileSinex filename', 'display_text': 'Convert normal equations from SINEX format to normal equations.
See also GnssNormals2Sinex and NormalsSphericalHarmonics2Sinex.'},
'Sinex2StationDiscontinuities': { 'name': 'Sinex2StationDiscontinuities', 'key': 'Sinex2StationDiscontinuities', 'description': 'Convert station discontinuities from (e.g. ITRF20) to Convert station discontinuities from (MISCVALUE). A value of 1 means position discontinuity, a value of 2 means velocity discontinuity. Start and end epochs with value 0 are added in addition to the discontinuities from SINEX to define continuity interval borders. See also Sinex2StationPosition and Sinex2StationPostSeismicDeformation .', 'config_table': 'outputfileInstrument filename loop variable is replaced with station name (e.g. wtzz) inputfileDiscontinuities filename SINEX (e.g. ITRF20) station discontinuities variableLoopStation string variable name for station loop stationName string only export these stations', 'display_text': 'Convert station discontinuities from SINEX format (e.g. ITRF20) to outputfileInstrument (MISCVALUE). A value of 1 means position discontinuity, a value of 2 means velocity discontinuity. Start and end epochs with value 0 are added in addition to the discontinuities from SINEX to define continuity interval borders.
See also Sinex2StationPosition and Sinex2StationPostSeismicDeformation.'},
'Sinex2StationPositions': { 'name': 'Sinex2StationPositions', 'key': 'Sinex2StationPositions', 'description': 'Extracts station positions from inputfileSinexSolution ( ) and writes an Extracts station positions from of type VECTOR3D for each station. Positions will be computed at Extracts station positions from based on position and velocity of each provided interval in the SINEX file. With inputfileSinexDiscontinuities the bounds of these time spans are adjusted to the exact epochs of discontinuities. The inputfileSinexPostSeismicDeformations adds the ITRF post-seismic deformation model to the affected stations. The inputfileSinexFrequencies adds annual and semi-annual frequencies. If extrapolateBackward or extrapolateForward are provided, positions will also be computed for epochs before the first interval/after the last interval, based on the position and velocity of the first/last interval. Position extrapolation will stop at the first discontinuity before the first interval/after the last interval. Stations can be limited via stationName , otherwise all stations in inputfileSinexSolution will be used.', 'config_table': 'outputfileInstrument filename loop variable is replaced with station name (e.g. wtzz) variableLoopStation string variable name for station loop inputfileSinexSolution filename SINEX file inputfileSinexDiscontinuities filename SINEX file inputfileSinexPostSeismicDeformations filename SINEX file inputfileSinexFrequencies filename SINEX file (XYZ or ENU) timeSeries timeSeriesType compute positions for these epochs based on velocity extrapolateForward boolean also compute positions for epochs after last interval defined in SINEX file extrapolateBackward boolean also compute positions for epochs before first interval defined in SINEX file stationName string convert only these stations', 'display_text': 'Extracts station positions from inputfileSinexSolution (SINEX format description) and writes an outputfileInstrument of type VECTOR3D for each station. Positions will be computed at timeSeries based on position and velocity of each provided interval in the SINEX file. With inputfileSinexDiscontinuities the bounds of these time spans are adjusted to the exact epochs of discontinuities. The inputfileSinexPostSeismicDeformations adds the ITRF post-seismic deformation model to the affected stations. The inputfileSinexFrequencies adds annual and semi-annual frequencies.
If extrapolateBackward or extrapolateForward are provided, positions will also be computed for epochs before the first interval/after the last interval, based on the position and velocity of the first/last interval. Position extrapolation will stop at the first discontinuity before the first interval/after the last interval.
Stations can be limited via stationName, otherwise all stations in inputfileSinexSolution will be used.'},
'SinexEccentricties2SlrPlatform': { 'name': 'SinexEccentricties2SlrPlatform', 'key': 'SinexEccentricties2SlrPlatform', 'description': 'Reads metadata like station name, station number, approximate station position and station eccentricities from (une version) and write them to the Reads metadata like station name, station number, approximate station position and station eccentricities from for each station.', 'config_table': 'outputfileStationInfo filename loop variable is replaced with station name variableLoopStation string variable name for station loop inputfileSinex filename SINEX file (.snx or .ssc) stationName string convert only these stations', 'display_text': 'Reads metadata like station name, station number, approximate station position and station eccentricities from Station Eccentricities Sinex File (une version) and write them to the outputfileStationInfo for each station.'},
'SinexMetadata2GlonassFrequencyNumber': { 'name': 'SinexMetadata2GlonassFrequencyNumber', 'key': 'SinexMetadata2GlonassFrequencyNumber', 'description': 'Create Create matrix from with the columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber. See also GnssGlonassFrequencyNumberUpdate , GnssAntex2AntennaDefinition , RinexObservation2GnssReceiver .', 'config_table': 'outputfileMatrixPrn2FrequencyNumber filename GROOPS matrix with columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber inputfileSinexMetadata filename IGS SINEX metadata file', 'display_text': 'Create outputfileMatrixPrn2FrequencyNumber matrix from IGS SINEX metadata format with the columns: GLONASS PRN, SVN, mjdStart, mjdEnd, frequencyNumber.
See also GnssGlonassFrequencyNumberUpdate, GnssAntex2AntennaDefinition, RinexObservation2GnssReceiver.'},
'SinexMetadata2SatelliteModel': { 'name': 'SinexMetadata2SatelliteModel', 'key': 'SinexMetadata2SatelliteModel', 'description': 'Create Create from . If Create is provided it is used as a basis and values are updated from the metadata file. See also SatelliteModelCreate .', 'config_table': 'outputfileSatelliteModel filename inputfileSinexMetadata filename IGS SINEX metadata file inputfileSatelliteModel filename base satellite model svn string e.g. G040, R736, E204, C211', 'display_text': 'Create outputfileSatelliteModel from IGS SINEX metadata format.
If inputfileSatelliteModel is provided it is used as a basis and values are updated from the metadata file.
See also SatelliteModelCreate.'},
'SlrComModel2RangeBiasStationSatellite': { 'name': 'SlrComModel2RangeBiasStationSatellite', 'key': 'SlrComModel2RangeBiasStationSatellite', 'description': 'Converts the tables of CoM corrections of José Rodríguez ( https://icts-yebes.oan.es/slr/com_models/models/ ) into station/satellite specific Converts the tables of CoM corrections of José Rodríguez ( . Only the deviations to the default value in Converts the tables of CoM corrections of José Rodríguez ( are written. This program must be called for every provided satellite. The range bias values can be used in Converts the tables of CoM corrections of José Rodríguez ( in SlrProcessing . Reference: Rodriguez J., Otsubo T., Appleby G. Upgraded Modelling for the Determination of Centre of Mass Corrections of Geodetic SLR Satellites: Impact on Key Parameters of the Terrestrial Reference Frame. Journal of Geodesy, 2019. doi: 10.1007/s00190-019-01315-0', 'config_table': 'outputfileRangeBias filename MISCVALUE, variable {station} available inputfileSatelliteInfo filename inputfile filename from Rodriguez model variableLoopStation string variable name for station loop stationName string convert only these stations', 'display_text': 'Converts the tables of CoM corrections of José Rodríguez (https://icts-yebes.oan.es/slr/com_models/models/) into station/satellite specific outputfileRangeBias. Only the deviations to the default value in inputfileSatelliteInfo are written. This program must be called for every provided satellite. The range bias values can be used in parametrization:rangeBiasStationSatelliteApriori in SlrProcessing.
Reference: Rodriguez J., Otsubo T., Appleby G. Upgraded Modelling for the Determination of Centre of Mass Corrections of Geodetic SLR Satellites: Impact on Key Parameters of the Terrestrial Reference Frame. Journal of Geodesy, 2019. doi: 10.1007/s00190-019-01315-0'},
'SlrSinexDataHandling2Files': { 'name': 'SlrSinexDataHandling2Files', 'key': 'SlrSinexDataHandling2Files', 'description': 'Converts SLR range and time bias from ILRS_Data_Handling_File_xxxx.xx.xx.snx provided at https://cddis.nasa.gov/archive/slr/products/resource/ . The range and time bias values can be used in Converts SLR range and time bias from in SlrProcessing .', 'config_table': 'outputfileRangeBiasStation filename MISCVALUE [m] outputfileRangeBiasStationSatellite filename MISCVALUE [m] outputfileTimeBias filename MISCVALUES(bias [s], drift [s/d]) variableLoopStation string variable name for station loop variableLoopSatellite string variable name for satellite loop inputfileSinex filename SINEX file (.snx) inputfileSatelliteId filename table SP3 and satellite name stationName string convert only these stations', 'display_text': 'Converts SLR range and time bias from ILRS_Data_Handling_File_xxxx.xx.xx.snx provided at https://cddis.nasa.gov/archive/slr/products/resource/. The range and time bias values can be used in parametrization:rangeBiasXxxApriori in SlrProcessing.'},
'Sp3Format2Orbit': { 'name': 'Sp3Format2Orbit', 'key': 'Sp3Format2Orbit', 'description': 'Read orbits from and write an instrument file (ORBIT) . The additional outputfileClock is an instrument file (MISCVALUE) and outputfileCovariance is an instrument file (COVARIANCE3D) . With satelliteIdentifier a single satellite can be selected if the inputfile s contain more than one satellites. If satelliteIdentifier is empty the first satellite is taken. All satellites can be selected with satelliteIdentifier = <all> . In this case the identifier is appended to each output file. If Read orbits from is provided the data are transformed from terrestrial (TRF) to celestial reference frame (CRF). Since SP3 orbits often use the center of Earth as a reference, a correction from center of Earth to center of mass can be applied to the orbits by providing Read orbits from (e.g. ocean tides). See also Orbit2Sp3Format .', 'config_table': 'outputfileOrbit filename outputfileClock filename outputfileCovariance filename 3x3 epoch covariance satelliteIdentifier string e.g. L09 for GRACE A, empty: take first satellite, : identifier is appended to each file earthRotation earthRotationType rotation from TRF to CRF gravityfield gravityfieldType degree 1 fluid mantle for CM2CE correction (SP3 orbits should be in center of Earth) inputfile filename orbits in SP3 format', 'display_text': 'Read orbits from SP3 format and write an instrument file (ORBIT). The additional outputfileClock is an instrument file (MISCVALUE) and outputfileCovariance is an instrument file (COVARIANCE3D).
With satelliteIdentifier a single satellite can be selected if the inputfiles contain more than one satellites. If satelliteIdentifier is empty the first satellite is taken. All satellites can be selected with satelliteIdentifier=<all>. In this case the identifier is appended to each output file.
If earthRotation is provided the data are transformed from terrestrial (TRF) to celestial reference frame (CRF). Since SP3 orbits often use the center of Earth as a reference, a correction from center of Earth to center of mass can be applied to the orbits by providing gravityfield (e.g. ocean tides).
See also Orbit2Sp3Format.'},
'StarCamera2GraceL1b': { 'name': 'StarCamera2GraceL1b', 'key': 'StarCamera2GraceL1b', 'description': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GROOPS format instrument file (STARCAMERA) to the GRACE SDS format (SCA1B). It reads one inputfileStarCamera and optionally one inputfileStarCameraFlags containing MISCVALUES (sca id, qual rss, qualflg), and writes one SDS output file. The text file inputfileHeader is placed at the beginning of the outputfile . The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin} , | {epochmax} , and {epochcount} . See also GraceL1b2StarCamera .', 'config_table': 'outputfile filename SCA1B inputfileHeader filename YAML Header, {epochmin}, {epochmax}, {epochcount} available inputfileStarCamera filename STARCAMERA inputfileStarCameraFlags filename MISCVALUES(sca_id, qual_rss, qualflg) satelliteId string A, B, C or D', 'display_text': 'This program converts orientation data measured by a star camera (SRF to CRF) from the GROOPS format instrument file (STARCAMERA) to the GRACE SDS format (SCA1B).
It reads one inputfileStarCamera and optionally one inputfileStarCameraFlags containing MISCVALUES(sca_id, qual_rss, qualflg), and writes one SDS output file.
The text file inputfileHeader is placed at the beginning of the outputfile. The text parser is applied so that all variables can be used. In addition, the times of the data are available with the variables {epochmin}, |{epochmax}, and {epochcount}.
See also GraceL1b2StarCamera.'},
'StarCamera2Orbex': { 'name': 'StarCamera2Orbex', 'key': 'StarCamera2Orbex', 'description': 'Converts satellite attitude from instrument file (STARCAMERA) to (quaternions). If Converts satellite attitude from is provided, the output file contains quaternions for rotation from TRF to satellite body frame (IGS/ORBEX convention), otherwise the rotation is from CRF to satellite body frame. See also GnssOrbex2StarCamera , SimulateStarCameraGnss .', 'config_table': 'outputfileOrbex filename ORBEX file satellite sequence inputfileStarCamera filename identifier string string identifier (e.g. GNSS PRN: G01) description string e.g. BLOCK IIR-B, GRACE earthRotation earthRotationType rotate data into Earth-fixed frame timeSeries timeSeriesType resample to these epochs (otherwise input file epochs are used) interpolationDegree uint for attitude and Earth rotation interpolation description string description of file contents createdBy string name of agency inputData string description of input data (see ORBEX description) contact string email address referenceFrame string reference frame used in file comment string', 'display_text': 'Converts satellite attitude from instrument file (STARCAMERA) to ORBEX file format (quaternions).
If earthRotation is provided, the output file contains quaternions for rotation from TRF to satellite body frame (IGS/ORBEX convention), otherwise the rotation is from CRF to satellite body frame.
See also GnssOrbex2StarCamera, SimulateStarCameraGnss.'},
'Swarm2Starcamera': { 'name': 'Swarm2Starcamera', 'key': 'Swarm2Starcamera', 'description': 'This program reads SWARM star camera data given in the cdf format and before converted to an ascii file using the program cdfexport provided by the Goddard Space Flight Center ( http://cdf.gsfc.nasa.gov/ ).', 'config_table': 'outputfileStarCamera filename earthRotation earthRotationType inputfile filename', 'display_text': 'This program reads SWARM star camera data given in the cdf format and before converted to an ascii file using the program cdfexport provided by the Goddard Space Flight Center (http://cdf.gsfc.nasa.gov/).'},
'TerraSarTandem2Orbit': { 'name': 'TerraSarTandem2Orbit', 'key': 'TerraSarTandem2Orbit', 'description': 'This program reads in TerraSar-X or Tandem-X orbits in the special CHORB format and takes the appropriate time frame as stated in the document header. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-002.pdf', 'config_table': 'outputfileOrbit filename earthRotation earthRotationType inputfile filename orbits in CHORB format', 'display_text': 'This program reads in TerraSar-X or Tandem-X orbits in the special CHORB format and takes the appropriate time frame as stated in the document header. A description of the format can be found under: http://op.gfz-potsdam.de/champ/docs_CHAMP/CH-GFZ-FD-002.pdf'},
'TerraSarTandem2StarCamera': { 'name': 'TerraSarTandem2StarCamera', 'key': 'TerraSarTandem2StarCamera', 'description': 'This program reads in TerraSar-X or Tandem-X star camera data given in the special format.', 'config_table': 'outputfileStarCamera filename inputfile filename', 'display_text': 'This program reads in TerraSar-X or Tandem-X star camera data given in the special format.'},
'Tle2Orbit': { 'name': 'Tle2Orbit', 'key': 'Tle2Orbit', 'description': 'This program computes the This program computes the from two-line elements (TLE/3LE) as can be found at e.g. http://celestrak.org/NORAD/elements/ . The first satellite in the input file that matches the wildcard of satelliteName is used. If more records with exactly the same name are found, the one with the closest reference epoch is used for each point in the This program computes the . The program uses the Simplified General Perturbation (SGP) model. More information can be found in the Revisiting Spacetrack Report 3 by Vallado et al. 2006.', 'config_table': 'outputfileOrbit filename inputfileTLE filename two line elements (TLE/3LE) satelliteName string first name of wildcard match is used timeSeries timeSeriesType output orbit at these times earthRotation earthRotationType rotation to CRF', 'display_text': 'This program computes the outputfileOrbit from two-line elements (TLE/3LE) as can be found at e.g. http://celestrak.org/NORAD/elements/. The first satellite in the input file that matches the wildcard of satelliteName is used. If more records with exactly the same name are found, the one with the closest reference epoch is used for each point in the timeSeries.
The program uses the Simplified General Perturbation (SGP) model. More information can be found in the Revisiting Spacetrack Report 3 by Vallado et al. 2006.'},
'ViennaMappingFunctionGrid2File': { 'name': 'ViennaMappingFunctionGrid2File', 'key': 'ViennaMappingFunctionGrid2File', 'description': 'This program converts the gridded time series of the Vienna Mapping Functions (VMF) into the GROOPS file format . Gridded VMF data is available at: https://vmf.geo.tuwien.ac.at/trop_products/GRID/', 'config_table': 'outputfileVmfCoefficients filename inputfile filename files must be given for each point in time timeSeries timeSeriesType times of input files deltaLambda angle [deg] sampling in longitude deltaPhi angle [deg] sampling in latitude isCellRegistered boolean grid points represent cells (VMF3), not grid corners (VMF1)', 'display_text': 'This program converts the gridded time series of the Vienna Mapping Functions (VMF) into the GROOPS file format.
Gridded VMF data is available at: https://vmf.geo.tuwien.ac.at/trop_products/GRID/'},
'ViennaMappingFunctionStation2File': { 'name': 'ViennaMappingFunctionStation2File', 'key': 'ViennaMappingFunctionStation2File', 'description': 'Converts Vienna Mapping Functions (VMF) station time series into GROOPS file format . Station-wise VMF data for GNSS is available at: https://vmf.geo.tuwien.ac.at/trop_products/GNSS/', 'config_table': 'outputfileVmfCoefficients filename inputfileStationInfo filename inputfileStation filename inputfileVmf filename', 'display_text': 'Converts Vienna Mapping Functions (VMF) station time series into GROOPS file format.
Station-wise VMF data for GNSS is available at: https://vmf.geo.tuwien.ac.at/trop_products/GNSS/'},
'GnssAttitude2Orbex': { 'name': 'GnssAttitude2Orbex', 'key': 'GnssAttitude2Orbex', 'description': 'DEPRECATED since 2024-11-30. Please use StarCamera2Orbex instead.', 'config_table': 'outputfileOrbex filename ORBEX file inputfileTransmitterList filename ASCII list with transmitter PRNs inputfileAttitude filename instrument file containing attitude variablePrn string loop variable for PRNs from transmitter list timeSeries timeSeriesType resample to these epochs (otherwise input file epochs are used) earthRotation earthRotationType rotate data into Earth-fixed frame interpolationDegree uint for attitude and Earth rotation interpolation description string description of file contents createdBy string name of agency inputData string description of input data (see ORBEX description) contact string email address referenceFrame string reference frame used in file comment string', 'display_text': 'DEPRECATED since 2024-11-30. Please use StarCamera2Orbex instead.'},
'GnssPrn2SvnBlockVariables': { 'name': 'GnssPrn2SvnBlockVariables', 'key': 'GnssPrn2SvnBlockVariables', 'description': 'DEPRECATED since 2024-02-12. This program no longer works! Setup up a DEPRECATED since 2024-02-12. This program no longer works! instead with DEPRECATED since 2024-02-12. This program no longer works! : the old inputfileTransmitterInfo equipmentType = gnssAntenna variableLoopName = block variableLoopSerial = svn variableLoopTimeStart = svnTimeStart variableLoopTimeEnd = svnTimeEnd DEPRECATED since 2024-02-12. This program no longer works! expression = (svnTimeStart <= time) && (time < svnTimeEnd) Attribute this loop to programs, which uses the variables.', 'config_table': '', 'display_text': 'DEPRECATED since 2024-02-12. This program no longer works!
Attribute this loop to programs, which uses the variables.'},
'GnssStationInfoCreate': { 'name': 'GnssStationInfoCreate', 'key': 'GnssStationInfoCreate', 'description': 'DEPRECATED since 2024-12-02. Please use PlatformCreate instead.', 'config_table': 'outputfileStationInfo filename markerName string markerNumber string comment string approxPositionX double [m] in TRF approxPositionY double [m] in TRF approxPositionZ double [m] in TRF antenna sequence name string serial string radome string comment string timeStart time timeEnd time positionX double [m] ARP in north, east, up or vehicle system positionY double [m] ARP in north, east, up or vehicle system positionZ double [m] ARP in north, east, up or vehicle system rotationX angle [degree] from local/vehicle to left-handed antenna system rotationY angle [degree] from local/vehicle to left-handed antenna system rotationZ angle [degree] from local/vehicle to left-handed antenna system flipX boolean flip x-axis (after rotation) flipY boolean flip y-axis (after rotation) flipZ boolean flip z-axis (after rotation) receiver sequence name string serial string version string comment string timeStart time timeEnd time referencePoint sequence e.g. center of mass in satellite frame comment string xStart double [m] in north, east, up or vehicle system yStart double linear motion between start and end zStart double xEnd double [m] in north, east, up or vehicle system yEnd double linear motion between start and end zEnd double timeStart time timeEnd time', 'display_text': 'DEPRECATED since 2024-12-02. Please use PlatformCreate instead.'},
'GridRectangular2NetCdf': { 'name': 'GridRectangular2NetCdf', 'key': 'GridRectangular2NetCdf', 'description': 'DEPRECATED since 2023-07-06. Please use GriddedData2NetCdf or GriddedDataTimeSeries2NetCdf instead.', 'config_table': 'outputfileNetCdf filename file name of NetCDF output inputfileGridRectangular filename input grid sequence times timeSeriesType values for time axis (COARDS specification) dataVariable sequence metadata for data variables selectDataField uint input data column name string netCDF variable name dataType choice double float int attribute choice netCDF attributes text sequence name string value string value sequence name string value double dataType choice double float int globalAttribute choice additional meta data text sequence name string value string value sequence name string value double dataType choice double float int', 'display_text': 'DEPRECATED since 2023-07-06. Please use GriddedData2NetCdf or GriddedDataTimeSeries2NetCdf instead.'},
'NetCdf2GridRectangular': { 'name': 'NetCdf2GridRectangular', 'key': 'NetCdf2GridRectangular', 'description': 'DEPRECATED since 2023-07-06. Please use NetCdf2GriddedData or NetCdf2GriddedDataTimeSeries instead.', 'config_table': 'outputfileGridRectangular filename One grid for each epoch in the NetCDF file is written. Use loopTimeVariable as template. loopTimeVariable string inputfileNetCdf filename variableNameLongitude string name of NetCDF variable variableNameLatitude string name of NetCDF variable variableNameTime string name of NetCDF variable (leave blank for static grids) variableNameData string name of NetCDF variable R double reference radius for ellipsoidal coordinates inverseFlattening double reference flattening for ellipsoidal coordinates', 'display_text': 'DEPRECATED since 2023-07-06. Please use NetCdf2GriddedData or NetCdf2GriddedDataTimeSeries instead.'},
'Sinex2StationPosition': { 'name': 'Sinex2StationPosition', 'key': 'Sinex2StationPosition', 'description': 'DEPRECATED since 2023-02-16. Please use Sinex2StationPositions instead.', 'config_table': 'outputfileInstrument filename loop variable is replaced with station name (e.g. wtzz) inputfileSinex filename SINEX file (.snx or .ssc) inputfileDiscontinuities filename discontinuities file per station; loop variable is replaced with station name (e.g. wtzz) variableLoopStation string variable name for station loop stationName string convert only these stations timeSeries timeSeriesType compute positions for these epochs based on velocity extrapolateForward boolean also compute positions for epochs after last interval defined in SINEX file extrapolateBackward boolean also compute positions for epochs before first interval defined in SINEX file', 'display_text': 'DEPRECATED since 2023-02-16. Please use Sinex2StationPositions instead.'},
'Sinex2StationPostSeismicDeformation': { 'name': 'Sinex2StationPostSeismicDeformation', 'key': 'Sinex2StationPostSeismicDeformation', 'description': 'DEPRECATED since 2023-02-16. Please use Sinex2StationPositions instead.', 'config_table': 'outputfileInstrument filename deformation time series inputfileSinex filename ITRF post-seismic deformation SINEX file timeSeries timeSeriesType compute deformation for these epochs stationName string localLevelFrame boolean output in North, East, Up local-level frame', 'display_text': 'DEPRECATED since 2023-02-16. Please use Sinex2StationPositions instead.'},
'autoregressiveModelSequenceType': { 'name': 'autoregressiveModelSequenceType', 'key': 'autoregressiveModelSequenceType', 'description': 'Represents a sequence of multivariate autoregressive (AR) models with increasing order . The AR models should be stored as matrix file in the GROOPS definition of AR models . The required AR models can be computed with CovarianceMatrix2AutoregressiveModel , and passed to this class through inputfileAutoregressiveModel in increasing order. The main purpose of AutoregressiveModelSequence is to use AR models of the form to create pseudo-observation equations with used to constrain high-frequency temporal gravity field variations (see KalmanSmootherLeastSquares , NormalsBuildShortTimeStaticLongTime , PreprocessingSst ). The corresponding normal equation coefficient matrix is given by and if all AR models are estimated from the same sample its inverse is a block-Toeplitz covariance matrix which can be computed using AutoregressiveModel2CovarianceMatrix . A detailed description with applications can be found in: Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1', 'config_table': 'autoregressiveModelSequenceType sequence inputfileAutoregressiveModel filename matrix file containing an AR model sigma0 double a-priori sigma for white noise covariance', 'display_text': 'Represents a sequence of multivariate autoregressive (AR) models with increasing order $p$. The AR models should be stored as matrix file in the GROOPS definition of AR models. The required AR models can be computed with CovarianceMatrix2AutoregressiveModel, and passed to this class through inputfileAutoregressiveModel in increasing order.
The corresponding normal equation coefficient matrix is given by \\[ \\label{eq:ar-normals} \\bar{\\mathbf{\\Phi}}^T\\bar{\\mathbf{\\Sigma}}^{-1}_{\\bar{\\mathbf{w}}}\\bar{\\mathbf{\\Phi}} \\]and if all AR models are estimated from the same sample its inverse is a block-Toeplitz covariance matrix \\[ (\\mathbf{\\Sigma}_{\\mathbf{y}_m})_{ij} = \\begin{cases} \\mathbf{\\Sigma}(|j-i|) & \\text{for } i \\leq j \\\\ \\mathbf{\\Sigma}(|j-i|))^T & \\text{otherwise} \\end{cases}, \\]which can be computed using AutoregressiveModel2CovarianceMatrix.
A detailed description with applications can be found in: Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties. J Geod 93, 2543–2552 (2019). https://doi.org/10.1007/s00190-019-01314-1'},
'borderType': { 'name': 'borderType', 'key': 'borderType', 'description': 'With this class you can select one or more region on the surface of the Earth. In every instance of Border you can choose whether the specific region is excluded from the overall result with the switch exclude . To determine whether a specific point will be used furthermore the following algorithm will be applied: In a first step all points are selected if first border excludes points otherwise all points excluded. When every point will be tested for each instance of border from top to bottom. If the point is not in the selected region nothing happens. Otherwise it will included or excluded depending on the switch exclude . First Example: The border excludes all continental areas. The result are points on the oceans only. Second Example: First border describes the continent north america. The next borders excludes the great lakes and the last border describes Washington island. In this configuration points are selected if they are inside north america but not in the area of the great lakes. But if the point is on Washington island it will be included again. The region is restricted along lines of geographical coordinates. minPhi and maxPhi describe the lower and the upper bound of the region. minLambda and maxLambda define the left and right bound. The region is defined by a spherical cap with the center given in geographical coordinates longitude ( lambdaCenter ) and latitude ( phiCenter ). The radius of the cap is given as aperture angle psi . The region is defined by containing one or more polygons given in longitude and latitude. An additional buffer around the polygon can be defined. Use a negative value to shrink the polygon area.', 'config_table': 'minLambda angle maxLambda angle minPhi angle maxPhi angle exclude boolean dismiss points inside lambdaCenter angle longitude of the center of the cap phiCenter angle latitude of the center of the cap psi angle aperture angle (radius) exclude boolean dismiss points inside inputfilePolygon filename buffer double buffer around polygon [km], <0: inside exclude boolean dismiss points inside', 'display_text': 'With this class you can select one or more region on the surface of the Earth. In every instance of Border you can choose whether the specific region is excluded from the overall result with the switch exclude. To determine whether a specific point will be used furthermore the following algorithm will be applied: In a first step all points are selected if first border excludes points otherwise all points excluded. When every point will be tested for each instance of border from top to bottom. If the point is not in the selected region nothing happens. Otherwise it will included or excluded depending on the switch exclude.
First Example: The border excludes all continental areas. The result are points on the oceans only.
Second Example: First border describes the continent north america. The next borders excludes the great lakes and the last border describes Washington island. In this configuration points are selected if they are inside north america but not in the area of the great lakes. But if the point is on Washington island it will be included again.
Rectangle
The region is restricted along lines of geographical coordinates. minPhi and maxPhi describe the lower and the upper bound of the region. minLambda and maxLambda define the left and right bound.
Cap
The region is defined by a spherical cap with the center given in geographical coordinates longitude (lambdaCenter) and latitude (phiCenter). The radius of the cap is given as aperture angle psi.
Polygon
The region is defined by inputfilePolygon containing one or more polygons given in longitude and latitude. An additional buffer around the polygon can be defined. Use a negative value to shrink the polygon area.'},
'conditionType': { 'name': 'conditionType', 'key': 'conditionType', 'description': 'Test for conditions. See Loop and conditions for usage. Check for a file or directory existing. Supports wildcards * for any number of characters and ? for exactly one character. Files smaller than minSize are treated as non-existent. Execute command and check success. Evaluate expression. Evaluate elements of a based on an expression. If all = yes , all elements of the matrix must evaluate to true for the condition to be fulfilled, otherwise any element evaluating to true is sufficient. Evaluate if matrix (or instrument ) file is empty/has zero size. Determines if there is a match between a pattern and some subsequence in a string . Supports wildcards * for any number of characters and ? for exactly one character. If isRegularExpression is set, pattern is interpreted as a regular expression instead. In any case, the text parser is applied beforehand. Determines if a pattern matches the entire string . Supports wildcards * for any number of characters and ? for exactly one character. If isRegularExpression is set, pattern is interpreted as a regular expression instead. In any case, the text parser is applied beforehand. All conditions must be met (with short-circuit evaluation). One of the conditions must be met (with short-circuit evaluation). The result of the condition is inverted.', 'config_table': 'file filename supports wildcards: * and ? minimumSize uint minimum file size in byte. command filename silently boolean without showing the output. expression expression matrix matrixGeneratorType expression is evaluated for each element of resulting matrix expression expression (variable: data) evaluated for each element all boolean all (=yes)/any (=no) elements must evaluate to true inputfileMatrix filename string filename should contain a {variable} pattern filename supports wildcards: * and ? isRegularExpression boolean pattern is a regular expression caseSensitive boolean treat lower and upper case as distinct string filename should contain a {variable} pattern filename supports wildcards: * and ? isRegularExpression boolean pattern is a regular expression caseSensitive boolean treat lower and upper case as distinct condition conditionType condition conditionType condition conditionType', 'display_text': 'Test for conditions. See Loop and conditions for usage.
FileExist
Check for a file or directory existing. Supports wildcards * for any number of characters and ? for exactly one character. Files smaller than minSize are treated as non-existent.
Command
Execute command and check success.
Expression
Evaluate expression.
Matrix
Evaluate elements of a matrix based on an expression. If all=yes, all elements of the matrix must evaluate to true for the condition to be fulfilled, otherwise any element evaluating to true is sufficient.
Determines if there is a match between a pattern and some subsequence in a string. Supports wildcards * for any number of characters and ? for exactly one character. If isRegularExpression is set, pattern is interpreted as a regular expression instead. In any case, the text parser is applied beforehand.
StringMatchPattern
Determines if a pattern matches the entire string. Supports wildcards * for any number of characters and ? for exactly one character. If isRegularExpression is set, pattern is interpreted as a regular expression instead. In any case, the text parser is applied beforehand.
And
All conditions must be met (with short-circuit evaluation).
Or
One of the conditions must be met (with short-circuit evaluation).
Not
The result of the condition is inverted.'},
'covariancePodType': { 'name': 'covariancePodType', 'key': 'covariancePodType', 'description': 'Provides arc-wise covariance matrices for precise orbit data. Temporal correlations are modeled in the orbit system (along, cross, radial). The provides temporal covariance functions for each axis. From the diagonal matrix for each time step the Toeplitz covariance matrix for an arc is constructed The epoch-wise covariance matrices given by are eigenvalue-decomposed where is an orthogonal matrix and diagonal. This is used to split the covariances matrices and to compose a block diagonal matrix for an arc The complete covariance matrix of an arc is given by where sigma is an overall factor and the arc specific factors can be provided with . The last matrix can be used to downweight outliers in single epochs and will be added if is provided.', 'config_table': 'covariancePodType sequence sigma double general variance factor inputfileSigmasPerArc filename different accuracies for each arc (multiplied with sigma) inputfileSigmasPerEpoch filename different accuracies for each epoch (added) inputfileCovarianceFunction filename covariances in time for along, cross, and radial direction inputfileCovariancePodEpoch filename 3x3 epoch-wise covariances', 'display_text': 'Provides arc-wise covariance matrices for precise orbit data. Temporal correlations are modeled in the orbit system (along, cross, radial). The inputfileCovarianceFunction provides temporal covariance functions for each axis. From the diagonal matrix for each time step \\[ Cov_{3\\times3}(t) = \\text{diag}(cov_x(t), cov_y(t), cov_z(t)) \\]the Toeplitz covariance matrix for an arc is constructed \\[ \\M C = \\begin{pmatrix} Cov(t_0) & Cov(t_1) & \\cdots & & & \\\\ Cov(t_1) & Cov(t_0) & Cov(t_1) & \\cdots & & \\\\ \\cdots & Cov(t_1) & Cov(t_0) & Cov(t_1) & \\cdots & \\\\ & \\cdots & \\ddots & \\ddots & \\ddots & \\cdots \\\\ \\end{pmatrix} \\] The epoch-wise $3\\times3$ covariance matrices given by inputfileCovariancePodEpoch are eigenvalue-decomposed \\[ \\M C_{3\\times3}(t_i) = \\M Q \\M\\Lambda \\M Q^T, \\]where $\\M Q$ is an orthogonal matrix and $\\M\\Lambda$ diagonal. This is used to split the covariances matrices \\[ \\M C_{3\\times3}(t_i) = \\M D(t_i) \\M D(t_i)^T = (\\M Q \\M\\Lambda^{1/2} \\M Q^T)(\\M Q \\M\\Lambda^{1/2} \\M Q^T)^T, \\]and to compose a block diagonal matrix for an arc \\[ \\M D = \\text{diag}(\\M D(t_1), \\M D(t_2), \\ldots, \\M D(t_2)). \\] The complete covariance matrix of an arc is given by \\[ \\M C_{arc} = \\sigma_0^2 \\sigma_{arc}^2 \\M D \\M C \\M D^T + \\text{diag}(\\sigma_1^2\\M I_{3\\times3}, \\sigma_2^2\\M I_{3\\times3}, \\ldots, \\sigma_n^2\\M I_{3\\times3}) \\]where sigma $\\sigma_0$ is an overall factor and the arc specific factors $\\sigma_{arc}$ can be provided with inputfileSigmasPerArc. The last matrix can be used to downweight outliers in single epochs and will be added if inputfileSigmasPerEpoch is provided.'},
'covarianceSstType': { 'name': 'covarianceSstType', 'key': 'covarianceSstType', 'description': 'Provides arc-wise covariance matrices for satellite-to-satellite observations (SST). The provides a temporal covariance function. From it the Toeplitz covariance matrix is constructed The complete covariance matrix of an arc is given by where sigma is an overall factor and the arc specific factors can be provided with . The second term describes general covariance matrices for each arc together with the factors from sigmasCovarianceMatrixArc . The last matrix can be used to downweight outliers in single epochs and will be added if is provided.', 'config_table': 'covarianceSstType sequence sigma double general variance factor inputfileSigmasPerArc filename different accuaries for each arc (multplicated with sigma) inputfileSigmasPerEpoch filename different accuaries for each epoch (added) inputfileCovarianceFunction filename covariance function in time inputfileCovarianceMatrixArc filename one matrix file per arc. Use {arcNo} as template sigmasCovarianceMatrixArc filename vector with one sigma for each covarianceMatrixArc', 'display_text': 'Provides arc-wise covariance matrices for satellite-to-satellite observations (SST). The inputfileCovarianceFunction provides a temporal covariance function. From it the Toeplitz covariance matrix is constructed \\[ \\M C = \\begin{pmatrix} cov(t_0) & cov(t_1) & \\cdots & & & \\\\ cov(t_1) & cov(t_0) & cov(t_1) & \\cdots & & \\\\ \\cdots & cov(t_1) & cov(t_0) & cov(t_1) & \\cdots & \\\\ & \\cdots & \\ddots & \\ddots & \\ddots & \\cdots \\\\ \\end{pmatrix} \\\\ \\] The complete covariance matrix of an arc is given by \\[ \\M C_{arc} = \\sigma_0^2 \\sigma_{arc}^2 \\M C + \\sigma_{S,arc}^2 \\M S_{arc}+ \\text{diag}(\\sigma_1^2, \\sigma_2^2, \\ldots, \\sigma_n^2) \\]where sigma $\\sigma_0$ is an overall factor and the arc specific factors $\\sigma_{arc}$ can be provided with inputfileSigmasPerArc. The second term describes general covariance matrices for each arc inputfileCovarianceMatrixArc together with the factors $\\sigma_{S,arc}$ from sigmasCovarianceMatrixArc. The last matrix can be used to downweight outliers in single epochs and will be added if inputfileSigmasPerEpoch is provided.
'},
'digitalFilterType': { 'name': 'digitalFilterType', 'key': 'digitalFilterType', 'description': 'Digital filter implementation for the filtering of equally spaced time series. This class implements the filter equations as where is the autoregressive (AR) order and is the moving average (MA) order. Note that the MA part can also be non-causal. The characteristics of a filter cascade can be computed by the programs DigitalFilter2FrequencyResponse and DigitalFilter2ImpulseResponse . To apply a filter cascade to a time series (or an instrument file ) use InstrumentFilter . Each filter can be applyed in forward and backward direction by setting backwardDirection . If the same filter is applied in both directions, the combined filter has zero phase and the squared magnitude response. Setting inFrequencyDomain to true applies the transfer function of the filter to the DFT of the input and synthesizes the result, i.e.: This is equivalent to setting padType to periodic . To reduce warmup effects, the input time series can be padded by choosing a padType : none : no padding is applied zero : zeros are appended at the beginning and end of the input time series constant : the beginning of the input time series is padded with the first value, the end is padded with the last value periodic : periodic continuation of the input time series (i.,e. the beginning is padded with the last epochs and the end is padded with the first epochs) symmetric : beginning and end are reflected around the first and last epoch respectively Moving average (boxcar) filter. For odd lengths, this filter is symmetric and has therefore no phase shift. For even lengths, a phase shift of half a cycle is introduced. Moving median filter of length . The filter output at epoch is the median of the set start at to . The filter length should be uneven to avoid a phase shift. Symmetric MA filter for numerical differentiation using polynomial approximation. The input time series is approximated by a moving polynomial of degree polynomialDegree , by solving for each time step ( is the sampling of the time series). The filter coefficients for the -th derivative are obtained by taking the appropriate row of the inverse coefficient matrix : The polynomialDegree should be even if no phase shift should be introduced. Numerical integration using polynomial approximation. The input time series is approximated by a moving polynomial of degree polynomialDegree by solving for each time step ( is the sampling of the time series). The numerical integral for each time step is approximated by the center interval of the estimated polynomial. polynomialDegree should be even to avoid a phase shift. Correlation ( ) of corr is introduced into the time series: Low pass and differentation filter as used for GRACE KBR and ACC data in the Level1A processing. Digital implementation of the Butterworth filter. The design of the filter is done by modifying the analog (continuous time) transfer function, which is then transformed into the digital domain by using the bilinear transform. The filter coefficients are then determined by a least squares adjustment in time domain. The filterType can be lowpass , highpass , where one cutoff frequency has to be specified, and bandpass and bandstop where to cutoff frequencies have to be specified. Cutoff frequencies must be given as normalized frequency . For a cutoff frequency of 30 mHz for a time series sampled with 5 seconds gives a normalized frequency of . Read filter coefficients of from a coefficient file. One column might define the index of the coefficients and in the other columns. Filter representation of a wavelet. Implemented after Christian Siemes\' dissertation, page 106. Moving average decorrelation filter based on eigendecomposition of a Toeplitz covariance matrix. Lag operator in digital filter representation. Removes the filtered signal from the input, i.e. the input is passed through a with a frequency response of .', 'config_table': 'length uint number of epochs in averaging operator inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges length uint length of the moving window [epochs] padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges polynomialDegree uint degree of approximation polynomial derivative uint take kth derivative sampling double assumed time step between points padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges polynomialDegree uint degree of approximation polynomial sampling double assumed time step between points padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges correlation double correlation backwardDirection boolean apply filter in backward direction inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges rawDataRate double sampling frequency in Hz (fs). convolutionNumber uint number of self convolutions of the filter kernel fitInterval double length of the filter kernel [seconds] lowPassBandwith double target low pass bandwidth normFrequency double norm filter at this frequency [Hz] (default: GRACE dominant (J2) signal frequency) reduceQuadraticFit boolean remove->filter->restore quadratic fit derivative choice derivative1st range rate derivative2nd range acceleration inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges order uint filter order type choice filter type lowpass sequence Wn double normalized cutoff frequency (f_c / f_nyq) highpass sequence Wn double normalized cutoff frequency (f_c / f_nyq) bandpass sequence Wn1 double lower normalized cutoff frequency (f_c / f_nyq) Wn2 double upper normalized cutoff frequency (f_c / f_nyq) bandstop sequence Wn1 double lower normalized cutoff frequency (f_c / f_nyq) Wn2 double upper normalized cutoff frequency (f_c / f_nyq) backwardDirection boolean apply filter in backward direction inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges inputfileMatrix filename matrix with filter coefficients index expression index of coefficients (input columns are named data0, data1, ...) bn expression MA coefficients (moving average) (input columns are named data0, data1, ...) an expression AR coefficients (autoregressive) (input columns are named data0, data1, ...) backwardDirection boolean apply filter in backward direction inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges inputfileWavelet filename wavelet coefficients type choice filter type lowpass highpass level uint compute filter for specific decomposition level backwardDirection boolean apply filter in backward direction inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges notchFrequency double normalized notch frequency w_n = (f_n/f_nyq) bandWidth double bandwidth at -3db. Quality factor of filter Q = w_n/bw backwardDirection boolean apply filter in backward direction inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges inputfileCovarianceFunction filename covariance function of time series inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges lag int lag epochs: 1 (lag); -1 (lead) inFrequencyDomain boolean apply filter in frequency domain padType choice none no padding is applied zero zero padding constant pad using first and last value periodic periodic continuation of matrix symmetric symmetric continuation around the matrix edges filter digitalFilterType remove filter output from input signal', 'display_text': 'Digital filter implementation for the filtering of equally spaced time series. This class implements the filter equations as \\[\\label{digitalFilterType:arma} \\sum_{l=0}^Q a_l y_{n-l} = \\sum_{k=-p_0}^{P-p_0-1} b_k x_{n-k}, \\hspace{25pt} a_0 = 1, \\]where $Q$ is the autoregressive (AR) order and $P$ is the moving average (MA) order. Note that the MA part can also be non-causal. The characteristics of a filter cascade can be computed by the programs DigitalFilter2FrequencyResponse and DigitalFilter2ImpulseResponse. To apply a filter cascade to a time series (or an instrument file ) use InstrumentFilter. Each filter can be applyed in forward and backward direction by setting backwardDirection. If the same filter is applied in both directions, the combined filter has zero phase and the squared magnitude response. Setting inFrequencyDomain to true applies the transfer function of the filter to the DFT of the input and synthesizes the result, i.e.: \\[ y_n = \\mathcal{F}^{-1}\\{H\\cdot\\mathcal{F}\\{x_n\\}\\}. \\]This is equivalent to setting padType to periodic.
To reduce warmup effects, the input time series can be padded by choosing a padType:
none: no padding is applied
zero: zeros are appended at the beginning and end of the input time series
constant: the beginning of the input time series is padded with the first value, the end is padded with the last value
periodic: periodic continuation of the input time series (i.,e. the beginning is padded with the last epochs and the end is padded with the first epochs)
symmetric: beginning and end are reflected around the first and last epoch respectively
MovingAverage
Moving average (boxcar) filter. For odd lengths, this filter is symmetric and has therefore no phase shift. For even lengths, a phase shift of half a cycle is introduced.
Moving median filter of length $n$. The filter output at epoch $k$ is the median of the set start at $k-n/2$ to $k+n/2$. The filter length $n$ should be uneven to avoid a phase shift.
Derivative
Symmetric MA filter for numerical differentiation using polynomial approximation. The input time series is approximated by a moving polynomial of degree polynomialDegree, by solving \\[ \\begin{bmatrix} x(t_k+\\tau_0) \\\\ \\vdots \\\\ x(t_k+\\tau_M) \\end{bmatrix} = \\begin{bmatrix} 1 & \\tau_0 & \\tau_0^2 & \\cdots & \\tau_0^M \\\\ \\vdots & \\vdots & \\vdots & & \\vdots \\\\ 1 & \\tau_M & \\tau_M^2 & \\cdots & \\tau_M^M \\\\ \\end{bmatrix}%^{-1} \\begin{bmatrix} a_0 \\\\ \\vdots \\\\ a_M \\end{bmatrix} \\qquad\\text{with}\\quad \\tau_j = (j-M/2)\\cdot \\Delta t, \\]for each time step $t_k$ ($\\Delta t$ is the sampling of the time series). The filter coefficients for the $k$-th derivative are obtained by taking the appropriate row of the inverse coefficient matrix $\\mathbf{W}$: \\[ b_n = \\prod_{i=0}^{k-1} (k-i) \\mathbf{w}_{2,:}. \\]The polynomialDegree should be even if no phase shift should be introduced.
Integral
Numerical integration using polynomial approximation. The input time series is approximated by a moving polynomial of degree polynomialDegree by solving \\[ \\begin{bmatrix} x(t_k+\\tau_0) \\\\ \\vdots \\\\ x(t_k+\\tau_M) \\end{bmatrix} = \\begin{bmatrix} 1 & \\tau_0 & \\tau_0^2 & \\cdots & \\tau_0^M \\\\ \\vdots & \\vdots & \\vdots & & \\vdots \\\\ 1 & \\tau_M & \\tau_M^2 & \\cdots & \\tau_M^M \\\\ \\end{bmatrix}%^{-1} \\begin{bmatrix} a_0 \\\\ \\vdots \\\\ a_M \\end{bmatrix} \\qquad\\text{with}\\quad \\tau_j = (j-M/2)\\cdot \\Delta t, \\]for each time step $t_k$ ($\\Delta t$ is the sampling of the time series). The numerical integral for each time step $t_k$ is approximated by the center interval of the estimated polynomial.
polynomialDegree should be even to avoid a phase shift.
Correlation
Correlation ($\\rho$) of corr is introduced into the time series: \\[ y_n = \\rho\\cdot y_{n-1} + \\sqrt{1-\\rho^2}x_n. \\]
GraceLowpass
Low pass and differentation filter as used for GRACE KBR and ACC data in the Level1A processing.
Butterworth
Digital implementation of the Butterworth filter. The design of the filter is done by modifying the analog (continuous time) transfer function, which is then transformed into the digital domain by using the bilinear transform. The filter coefficients are then determined by a least squares adjustment in time domain.
The filterType can be lowpass, highpass, where one cutoff frequency has to be specified, and bandpass and bandstop where to cutoff frequencies have to be specified. Cutoff frequencies must be given as normalized frequency $w_n = f/f_{\\text{nyq}}$. For a cutoff frequency of 30 mHz for a time series sampled with 5 seconds gives a normalized frequency of $0.03/0.1 = 0.3$.
File
Read filter coefficients of \\eqref{digitalFilterType:arma} from a coefficient file. One column might define the index $n$ of the coefficients $a_n$ and $b_n$ in the other columns.
Wavelet
Filter representation of a wavelet.
Notch
Implemented after Christian Siemes\' dissertation, page 106.
Decorrelation
Moving average decorrelation filter based on eigendecomposition of a Toeplitz covariance matrix.
TimeLag
Lag operator in digital filter representation.
ReduceFilterOutput
Removes the filtered signal from the input, i.e. the input is passed through a digitalFilter with a frequency response of $1-H(f)$.'},
'doodson': { 'name': 'doodson', 'key': 'doodson', 'description': 'This is a string which describes a tidal frequency either coded as Doodson number or using Darwin´s name, e.g. 255.555 or M2 . The following names are defined: 055.565 : om1 055.575 : om2 056.554 : sa 056.555 : sa 057.555 : ssa 058.554 : sta 063.655 : msm 065.455 : mm 073.555 : msf 075.555 : mf 083.655 : mstm 085.455 : mtm 093.555 : msq 093.555 : msqm 125.755 : 2q1 127.555 : sig1 127.555 : sigma1 135.655 : q1 137.455 : ro1 137.455 : rho1 145.555 : o1 147.555 : tau1 155.655 : m1 157.455 : chi1 162.556 : pi1 163.555 : p1 164.555 : s1 165.555 : k1 166.554 : psi1 167.555 : fi1 167.555 : phi1 173.655 : the1 173.655 : theta1 175.455 : j1 183.555 : so1 185.555 : oo1 195.455 : v1 225.855 : 3n2 227.655 : eps2 235.755 : 2n2 237.555 : mu2 237.555 : mi2 245.655 : n2 247.455 : nu2 247.455 : ni2 253.755 : gam2 254.556 : alf2 255.555 : m2 256.554 : bet2 257.555 : dlt2 263.655 : la2 263.655 : lmb2 263.655 : lambda2 265.455 : l2 271.557 : 2t2 272.556 : t2 273.555 : s2 274.554 : r2 275.555 : k2 283.655 : ksi2 285.455 : eta2 355.555 : m3 381.555 : t3 382.555 : s3 383.555 : r3 435.755 : n4 445.655 : mn4 455.555 : m4 473.555 : ms4 491.555 : s4 655.555 : m6 855.555 : m8', 'config_table': '', 'display_text': 'This is a string which describes a tidal frequency either coded as Doodson number or using Darwin´s name, e.g. 255.555 or M2.
The following names are defined:
055.565: om1
055.575: om2
056.554: sa
056.555: sa
057.555: ssa
058.554: sta
063.655: msm
065.455: mm
073.555: msf
075.555: mf
083.655: mstm
085.455: mtm
093.555: msq
093.555: msqm
125.755: 2q1
127.555: sig1
127.555: sigma1
135.655: q1
137.455: ro1
137.455: rho1
145.555: o1
147.555: tau1
155.655: m1
157.455: chi1
162.556: pi1
163.555: p1
164.555: s1
165.555: k1
166.554: psi1
167.555: fi1
167.555: phi1
173.655: the1
173.655: theta1
175.455: j1
183.555: so1
185.555: oo1
195.455: v1
225.855: 3n2
227.655: eps2
235.755: 2n2
237.555: mu2
237.555: mi2
245.655: n2
247.455: nu2
247.455: ni2
253.755: gam2
254.556: alf2
255.555: m2
256.554: bet2
257.555: dlt2
263.655: la2
263.655: lmb2
263.655: lambda2
265.455: l2
271.557: 2t2
272.556: t2
273.555: s2
274.554: r2
275.555: k2
283.655: ksi2
285.455: eta2
355.555: m3
381.555: t3
382.555: s3
383.555: r3
435.755: n4
445.655: mn4
455.555: m4
473.555: ms4
491.555: s4
655.555: m6
855.555: m8
'},
'earthRotationType': { 'name': 'earthRotationType', 'key': 'earthRotationType', 'description': 'This class realize the transformation between a terestrial reference frame (TRF) and a celestial reference frame (CRF). This class realize the transformation by interpolation from file. This file can be created with EarthOrientationParameterTimeSeries . This class realize the transformation according to the IERS2010 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed ( ). This class realize the transformation according to the IERS2010 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed ( ). Includes additional high-frequency EOP models ( ). This class realize the transformation according to IERS2003 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed ( ). The following subroutines are used: BPN2000.f, ERA2000.f, pmsdnut.f, POM2000.f, SP2000.f, T2C2000.f, XYS2000A.f from ftp://maia.usno.navy.mil/conv2000/chapter5/ and orthoeop.f from ftp://maia.usno.navy.mil/conv2000/chapter8/ Very old. The transformation is realized as rotation about the z-axis. The angle ist given by the Greenwich Mean Siderial Time (GMST). Double Tu0 = (timeUTC.mjdInt()-51544.5)/36525.0; Double GMST0 = (6.0/24 + 41.0/(24*60) + 50.54841/(24*60*60)) + (8640184.812866/(24*60*60))*Tu0 + (0.093104/(24*60*60))*Tu0*Tu0 + (-6.2e-6/(24*60*60))*Tu0*Tu0*Tu0; Double r = 1.002737909350795 + 5.9006e-11*Tu0 - 5.9e-15*Tu0*Tu0; GMST = fmod(2*PI*(GMST0 + r * timeUTC.mjdMod()), 2*PI); The transformation is realized as rotation about the z-axis. The angle ist given by the Earth Rotation Angle (ERA) as const Time T = timeUT1-mjd2time(J2000); ERA = fmod(2*PI*(0.7790572732640 + T.mjdMod() + 0.00273781191135448*T.mjd()), 2*PI); The transformation is realized as rotation about the z-axis. You must specify the angle ( initialAngle ) at time0 and the angular velocity ( angularVelocity ). This class reads quaternions from an instrument file and interpolates to the given time stamp. This class realizes the transformation between the moon-fixed system (Principal Axis System (PA) or Mean Earth System (ME)) and the ICRS according to the JPL ephemeris file.', 'config_table': 'inputfileEOP filename interpolationDegree uint for polynomial interpolation inputfileEOP filename truncatedNutation boolean use truncated nutation model (IAU2006B) inputfileEOP filename inputfileDoodsonEOP filename inputfileEOP filename inputfileEOP filename inputfileNutation filename initialAngle double Angle at time0 [rad] angularVelocity double [rad/s] time0 time inputfileStarCamera filename interpolationDegree uint degree of interpolation polynomial inputfileEphemerides filename librations moonfixedSystem choice PA Principal Axis System ME Mean Earth System', 'display_text': 'This class realize the transformation between a terestrial reference frame (TRF) and a celestial reference frame (CRF).
This class realize the transformation according to the IERS2010 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed (inputfileEOP).
Iers2010b
This class realize the transformation according to the IERS2010 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed (inputfileEOP). Includes additional high-frequency EOP models (inputfileDoodsonEOP).
Iers2003
This class realize the transformation according to IERS2003 conventions given by the International Earth Rotation and Reference Systems Service (IERS). A file with the earth orientation parameter is needed (inputfileEOP).
The transformation is realized as rotation about the z-axis. The angle ist given by the Earth Rotation Angle (ERA) as
const Time T = timeUT1-mjd2time(J2000); ERA = fmod(2*PI*(0.7790572732640 + T.mjdMod() + 0.00273781191135448*T.mjd()), 2*PI);
Z-Axis
The transformation is realized as rotation about the z-axis. You must specify the angle (initialAngle) at time0 and the angular velocity (angularVelocity).
StarCamera
This class reads quaternions from an instrument file and interpolates to the given time stamp.
MoonRotation
This class realizes the transformation between the moon-fixed system (Principal Axis System (PA) or Mean Earth System (ME)) and the ICRS according to the JPL ephemeris file.'},
'eclipseType': { 'name': 'eclipseType', 'key': 'eclipseType', 'description': 'Shadowing of satellites by moon and Earth provided as factor between with 0: full shadow and 1: full sun light. Earth’s penumbra modeling with Solar radiation pressure with Oblateness and Lower Atmospheric Absorption, Refraction, and Scattering (SOLAARS). See Robertson, Robbie. (2015), Highly Physical Solar Radiation Pressure Modeling During Penumbra Transitions (pp. 67-75).', 'config_table': '', 'display_text': 'Shadowing of satellites by moon and Earth provided as factor between $[0,1]$ with 0: full shadow and 1: full sun light.
Conical
SOLAARS
Earth’s penumbra modeling with Solar radiation pressure with Oblateness and Lower Atmospheric Absorption, Refraction, and Scattering (SOLAARS). See Robertson, Robbie. (2015), Highly Physical Solar Radiation Pressure Modeling During Penumbra Transitions (pp. 67-75).'},
'ephemeridesType': { 'name': 'ephemeridesType', 'key': 'ephemeridesType', 'description': 'Ephemerides of Sun, Moon and planets. The coordinate system is defined as center of . Using DExxx ephemerides from NASA Jet Propulsion Laboratory (JPL).', 'config_table': 'inputfileEphemerides filename origin planetType center of coordinate system', 'display_text': 'Ephemerides of Sun, Moon and planets. The coordinate system is defined as center of origin. Using DExxx ephemerides from NASA Jet Propulsion Laboratory (JPL).'},
'forcesType': { 'name': 'forcesType', 'key': 'forcesType', 'description': 'This class provides the forces acting on a satellite. This encompasses , and .', 'config_table': 'forcesType sequence gravityfield gravityfieldType tides tidesType miscAccelerations miscAccelerationsType', 'display_text': 'This class provides the forces acting on a satellite. This encompasses gravityfield, tides and miscAccelerations.'},
'gnssAntennaDefintionListType': { 'name': 'gnssAntennaDefintionListType', 'key': 'gnssAntennaDefintionListType', 'description': 'Provides a list of GnssAntennaDefinitions as used in GnssAntennaDefinitionCreate . Creates a new antenna. Select all or the first antenna from an antenna definition file which matches the wildcards. Select all antennas from an antenna definition file which are used by a station within a defined time interval. With specializeAntenna an individual antenna is created for each different serial number using the general type specific values from file. The azimuth and elevation dependent antenna center variations (patterns) of all antenna s are resampled to a new resolution. This class can be used to separate general antenna patterns for different s. If the antenna s contain only one pattern for all GPS observations on the L1 frequency ( *1*G** ), the patternTypes = C1*G** and L1*G** create two patterns with the *1*G** patterm as template. The first matching pattern in the antenna is used as template. Also new additionalPattern can be added (e.g. for *5*G** ). With addExistingPatterns all already existing patterns that don\'t match completely to any of the above are added. Replaces parts of the descrption of antenna s. The star " * " left this part untouched. The antenna center variations (patterns) or offsets of all antenna s are set to zero. The antenna offset and antenna variations (patterns) are inseparable parts of the antenna model. With removeOffset an estimated offset is removed from all selected patterns and added to the offset. With removeMean an estimated constant is removed additionally as it cannot be seperated from signal biases. The mean and offset are defined as discretized ( deltaAzimuth , dZenith ) integral of the spherical cap from zenith down to maxZenith .', 'config_table': 'name string serial string radome string comment string pattern sequence type gnssType pattern matching of observation types offsetX double [m] antenna center offset offsetY double [m] antenna center offset offsetZ double [m] antenna center offset deltaAzimuth angle [degree] step size deltaZenith angle [degree] step size maxZenith angle [degree] values expression [m] expression (zenith, azimuth: variables) inputfileAntennaDefinition filename name string serial string radome string onlyFirstMatch boolean otherwise all machting antennas included inputfileStationInfo filename inputfileAntennaDefinition filename timeStart time only antennas used in this time interval timeEnd time only antennas used in this time interval specializeAntenna boolean e.g. separate different serial numbers from stationInfo antenna gnssAntennaDefintionListType deltaAzimuth angle [degree] step size, empty: no change deltaZenith angle [degree] step size, empty: no change maxZenith angle [degree], empty: no change antenna gnssAntennaDefintionListType patternTypes gnssType gnssType for each pattern (first match is used) additionalPattern sequence additional new patterns type gnssType pattern matching of observation types offsetX double [m] antenna center offset offsetY double [m] antenna center offset offsetZ double [m] antenna center offset deltaAzimuth angle [degree] step size deltaZenith angle [degree] step size maxZenith angle [degree] values expression [m] expression (zenith, azimuth: variables) addExistingPatterns boolean add existing patterns that don\'t match completely any of the above antenna gnssAntennaDefintionListType name string *: left this part untouched serial string *: left this part untouched radome string *: left this part untouched comment string *: left this part untouched antenna gnssAntennaDefintionListType patternTypes gnssType only matching patterns, default: all zeroOffset boolean zeroPattern boolean antenna gnssAntennaDefintionListType patternTypes gnssType only matching patterns, default: all removeMean boolean removeOffset boolean deltaAzimuth angle [degree] sampling of pattern to estimate center/constant deltaZenith angle [degree] sampling of pattern to estimate center/constant maxZenith angle [degree] sampling of pattern to estimate center/constant', 'display_text': 'Provides a list of GnssAntennaDefinitions as used in GnssAntennaDefinitionCreate.
New
Creates a new antenna.
FromFile
Select all or the first antenna from an antenna definition file which matches the wildcards.
FromStationInfo
Select all antennas from an antenna definition file which are used by a station within a defined time interval. With specializeAntenna an individual antenna is created for each different serial number using the general type specific values from file.
Resample
The azimuth and elevation dependent antenna center variations (patterns) of all antennas are resampled to a new resolution.
Transform
This class can be used to separate general antenna patterns for different gnssTypes. If the antennas contain only one pattern for all GPS observations on the L1 frequency (*1*G**), the patternTypes=C1*G** and L1*G** create two patterns with the *1*G** patterm as template. The first matching pattern in the antenna is used as template. Also new additionalPattern can be added (e.g. for *5*G**). With addExistingPatterns all already existing patterns that don\'t match completely to any of the above are added.
Rename
Replaces parts of the descrption of antennas. The star "*" left this part untouched.
SetZero
The antenna center variations (patterns) or offsets of all antennas are set to zero.
RemoveCenterMean
The antenna offset and antenna variations (patterns) are inseparable parts of the antenna model. With removeOffset an estimated offset is removed from all selected patterns and added to the offset. With removeMean an estimated constant is removed additionally as it cannot be seperated from signal biases. The mean and offset are defined as discretized (deltaAzimuth, dZenith) integral of the spherical cap from zenith down to maxZenith.'},
'gnssParametrizationType': { 'name': 'gnssParametrizationType', 'key': 'gnssParametrizationType', 'description': 'This class defines the models and parameters of the linearized observation equations for all phase and code measurements (see GnssProcessing ) where the left side is the observation vector minus the effects computed from the a priori models. After each least squares adjustment (see ) the a priori parameters are updated The vector can be written with . Any outputfiles defined in the parametrizations are written with . Each parametrization (and possible constraint equations) has a name which enables activating/deactivating the estimation of subsets of with . The a priori model is unaffected and is always reduced. The model for the different observation types can be described as The notation describes the attribution to a signal type (i.e., C or L), frequency , signal attribute (e.g., C, W, Q, X), transmitting satellite , and observing receiver . It follows the , see GnssType . See also GnssProcessing . The influence of the ionosphere is modelled by a STEC parameter (slant total electron content) in terms of between each transmitter and receiver at each epoch. These parameters are pre-eliminated from the observation equations before accumulating the normal equations. This is similar to using the ionosphere-free linear combination as observations but only one STEC parameter is needed for an arbitrary number of observation types. The influence on the code and phase observation is modeled as The second order term depends on the and the direction of the signal . If further information about the ionosphere is available (in the form of a prior model or as additional parametrizations such as or ) the STEC parameters describe local and short–term scintillations. The STEC parameters are estimated as additions to the model and it is advised to constrain them towards zero with a standard deviation of sigmaSTEC . The influence of the ionosphere is modelled by a VTEC parameter (vertical total electron content) in terms of for every selected receiver at each epoch. Optionally, VTEC gradients in the North (x) and East (y) direction can be estimated via . The slant TEC is computed based on the VTEC and the optional North and East gradients and using the elevation-dependent Modified Single-Layer Model (MSLM) mapping function inserted into eq. , where is the azimuth angle and is the elevation angle. The result is written as a times series file at epochs with observations depending on . This class provides a simplified model of the ionosphere for single receivers and enables the separation of the TEC and signal biases, meaning becomes estimable. Local and short-term scintillations should be considered by adding loosely constrained . The parameter names are <station>:VTEC::<time> , <station>:VTECGradient.x:<temporal>:<interval> , <station>:VTECGradient.y:<temporal>:<interval> . Apriori VTEC maps can be removed from the observations with (e.g. from GnssIonex2GriddedDataTimeSeries ). The ionosphere is parametrized in terms of in a single layer sphere with radiusIonosphericLayer as a ly changing (e.g. hourly linear splines) spherical harmonics expansion up to maxDegree = 15 in a solar-geomagentic frame defined by . The VTEC values are mapped to STEC values in the observation equations via eq. . The estimated VTEC inclusive the apriori can be written to evaluated at and . Local and short-term scintillations should be considered by adding constrained . To account for signal biases add . The parameter names are VTEC:sphericalHarmonics.c_<degree>_<order>:<temporal>:<interval> , VTEC:sphericalHarmonics.s_<degree>_<order>:<temporal>:<interval> . Clock errors are estimated epoch-wise for each . No clock errors are estimated if no valid observations are available (e.g. data gaps in the observations). If all transmitters and receivers are selected by selectTransmitters and selectReceivers respectively, these parameters will be lineary dependent which would lead to a rank deficiency in the normal equation matrix. To circumvent this issue, the estimation requires an additional zero-mean constraint added in each epoch. This is realized with an additional observation equation summed over all with a standard deviation of sigmaZeroMeanConstraint . The parameter names are <station or prn>:clock::<time> . This parametrization is an alternative to . Clock errors are estimated epoch-wise for each and, opposed to , are also estimated for epochs that have no valid observations available (e.g. data gaps). The clock error of an epoch can be predicted by the clock error of the preceding epoch and an unknown clock drift This equation is applied as an additional constraint equation in each epoch The variance is estimated iteratively by variance component estimation (VCE). Clock jumps are treated as outliers and are automatically downweighted as described in . The absolute initial clock error and clock drift cannot be determined if all receiver and transmitter clocks are estimated together due to their linear dependency. This linear dependency would lead to a rank deficiency in the normal equation matrix in the same manner as described in . To circumvent this issue, an additional zero-mean constraint is added in each epoch as observation equation summed over all . This should be a loose constraint with a relatively large standard deviation of sigmaZeroMeanConstraint . The parameter names are <station or prn>:clock::<time> and <station or prn>:clockDrift:: . Each code and phase observation (e.g C1C or L2W ) contains a bias at transmitter/receiver level This class provides the apriori model of eq. only. The are read for each transmitter and receiver. Those file names are interpreted as a template with the variable {prn} or {station} being replaced by transmitter PRNs or receiver station names, respectively. (Infos regarding the variables {prn} and {station} can be found in and respectively). Those files can be converted with GnssSinexBias2SignalBias . The estimation of the biases is complex due to different linear dependencies, which result in rank deficiencies in the system of normal equations. For simplification the parametrization for has been split into: , , and (including phase biases). The file handling on the other hand still remains within this class. Any prior values for the transmitter/receiver biases are read with the respective inputfileSignalBiasTransmitter/Receiver . All biases for a transmitter/receiver are accumulated and written to the respective outputfileSignalBiasTransmitter/Receiver . Sets up an ambiguity parameter for each track and phase observation type. As the phase observations contain a float bias at transmitter/receiver level, not all ambiguities are resolvable to integer values. The number of resolvable ambiguities can be increased with known phase biases read from file via . In this case, should not be used for the corresponding transmitters and receivers. In case of GLONASS, the phase biases at receiver level differ between different frequency channels (frequency division multiple access, FDMA) and for each channel an extra float phase bias is estimated. With linearGlonassBias a linear relationship between bias and frequency channel is assumed, which reduces the number of float bias parameters and increases the number of resolvable integer ambiguities. The integer ambiguities can be resolved and fixed in . Resolved integer ambiguities are not estimated as unknown parameters in anymore and are removed from the system of normal equations. The estimated phase biases can be written to files in . The parameter names are <station>:phaseBias(<gnssType>):: , <prn>:phaseBias(<gnssType>):: , <station>.<prn>:ambiguity<index>of<count>(<GnssTypes>)::<track interval> . Each code observation (e.g C1C or C2W ) contains a bias at transmitter/receiver level The code biases cannot be estimated together with clock errors and ionospheric delays in an absolute sense as rank deficiencies will occur in the system of normal equations. Therefore, the biases are not initialized and set up as parameters directly but only estimable linear combinations are parametrized. The basic idea is to set up simplified normal equations with the biases, clock and STEC parameters of one single receiver or transmitter, eliminate clock and STEC parameters and perform an eigen value decomposition of the normal equation matrix Instead of estimating the original bias parameter a transformed set is introduced: The new parameters corresponding to eigen values are estimable, the others are left out (set to zero). The behavior can be controlled by explicitly setting up to two bias types with for each transmitter to zero. These then define the ionosphere-free clock datum of the transmitter. The missing linear combinations, which depend on the STEC parameters, can be added with . Additional rank deficiencies may also occur when biases of transmitters and receivers are estimated together. The minimum norm nullspace (also via eigen value decomposition) is formulated as zero constraint equations and added with a standard deviation of sigmaZeroMeanConstraint . In case of GLONASS the code biases at receiver level can differ between different frequency channels (frequency division multiple access, FDMA) and for each channel an extra code bias is estimated. With linearGlonassBias a linear relationship between bias and frequency channel is assumed, which reduces the number of bias parameters. The estimated biases can be written to files in . The parameter names are <station or prn>:codeBias0<index><combi of gnssTypes>:: . Each code observation (e.g C1C or C2W ) contains a bias at transmitter/receiver level This parametrization represents the linear combination of signal biases which completely depend on the STEC parameters. Ignoring these bias combinations would result in a biased STEC estimation (all other parameters are nearly unaffected). To determine this part of the signal biases the should be constrained. Furthermore, additional information about the ionosphere is required from or . Rank deficiencies due to the signal bias parameters may occur if biases of transmitters and receivers are estimated together. The minimum norm nullspace is formulated as zero constraint equations and added with a standard deviation of sigmaZeroMeanConstraint . The accumulated estimated result can be written to files in . The parameter names are <station or prn>:tecBias0<index><combi of gnssTypes>:: . This parametrization resolves the issue of some phase observations suffering from time-variable biases. Such a phenomenon has been found to affect GPS block IIF satellites on the L5 phase measurements (see Montenbruck et al. 2011, DOI: ). For these time-variable biases an appropriate temporal representation has to be defined in . For example, time-variable biases for GPS block IIF L5 phase observations ( = L5*G ) can be represented by a cubic spline with a nodal distance of one hour. The result is written as a times series file at the processing sampling or the sampling set by ). This parametrization should be set up in addition to the constant . Depending on the temporal representation a temporal zero-mean constraint is needed to separate this parametrization from the constant component. The constraint equations are added with a standard deviation of sigmaZeroMeanConstraint . The parameter names are <prn>:signalBias.<gnssType>:<temporal>:<interval> . Estimates a static position for all in the terrestrial frame. No-net constraints can be applied for a subset of stations, , with a standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma . If the template is provided the constraints are applied relatively to these positions. Only stations with an existing position file are considered. Without the constraints are applied towards the apriori values from . As a single corrupted station position can disturb the no-net conditions, the rotation/translation parameters are estimated in a robust least squares adjustment beforehand. The computed weight matrix is used to downweight corrupted stations in the constraint equations. In case you want to align to an ITRF/IGS reference frame, precise coordinates can be generated with Sinex2StationPositions . The parameter names are <station>:position.x:: , <station>:position.y:: , <station>:position.z:: . Estimates the epoch-wise in an Earth-fixed frame (or in case of LEO satellites in an intertial frame). The epoch wise are computed within The parameter names are <station>:position.x::<time> , <station>:position.y::<time> , <station>:position.z::<time> . The estimation of (reduced) dynamic orbits is formulated as variational equations. It is based on calculated with PreprocessingVariationalEquation . Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree . The must include at least those parameters that were estimated in PreprocessingVariationalEquationOrbitFit . Additional parameters can be set up to reduce orbit mismodeling. If not enough epochs with observations are available ( minEstimableEpochsRatio ) the LEO satellite is disabled. The parameters and parameter names are divided into global <station>:<parametrizationAcceleration>:*:* , <station>:stochasticPulse.x::<time> , <station>:stochasticPulse.y::<time> , <station>:stochasticPulse.z::<time> , and arc related parameters <station>:arc<no>.<parametrizationAcceleration>:*:* , <station>:arc<no>.position0.x:: , <station>:arc<no>.position0.y:: , <station>:arc<no>.position0.z:: . <station>:arc<no>.velocity0.x:: , <station>:arc<no>.velocity0.y:: , <station>:arc<no>.velocity0.z:: . Same as but for transmitting GNSS satellites. For more details see orbit integration . A priori tropospheric correction is handled by a model (e.g. Vienna Mapping Functions 3). Additional parameters in for zenith wet delay and gradients can be set up via (usually 2-hourly linear splines) and (usually a daily trend). These parameters can be soft-constrained using to avoid an unsolvable system of normal equations in case of data gaps. The parameter names are <station>:troposphereWet:<temporal>:<interval> , <station>:troposphereGradient.x:<temporal>:<interval> , <station>:troposphereGradient.y:<temporal>:<interval> . Earth rotation parameters (ERPs) can be estimated by defining estimatePole ( , ) and estimateUT1 ( ). Estimating length of day (LOD) with the sign according to IGS conventions requires a negative value in . Constraints on the defined parameters can be added via . An example would be to set up so the parameter is included in the normal equation system . Since cannot be determined by GNSS, a hard constraint to its a priori value can then be added. The parameter names are earth:polarMotion.xp:<temporal>:<interval> , earth:polarMotion.yp:<temporal>:<interval> , earth:UT1:<temporal>:<interval> , earth:nutation.X:<temporal>:<interval> , earth:nutation.Y:<temporal>:<interval> . This class is for parametrization the antenna for their antenna center offsets (ACO) and antenna center variations (ACV) by . The receivers to be estimated can be selected by . The amount of patterns to be estimated is configurable with a list of . For each added a set of parameters will be evaluated. The observations will be assigned to the first that matches their own. E.g. having the patterns: ***G and L1* would lead to all GPS observations be assigned to the observation equations of the first pattern. The pattern type L1* would then consist of all other GNSS L1 phase observations. addNonMatchingTypes will, if activated, create automatically patterns for that are not selected within the list . Furthermore, it is possible to group same antenna build types from different receivers by groupAntennas . The grouping by same antenna build ignores antenna serial numbers. To estimate the antenna variation parameters, a longer period of observations might be necessary for accurate estimations. Hence one should use this parametrization by accumulating normal equations from several epochs. This can be accomplished as the last steps in the by adding to current selected parameters with and write the normal equation matrix with . The written normal equations can then be accumulated with NormalsAccumulate and solved by NormalsSolverVCE . Further, one should apply constraints to the normal equations by GnssAntennaNormalsConstraint since the estimation of ACO and ACV can lead to rank deficiencies in the normal equation matrix. Last the solved normal equation can be parsed to a antenna definition file with the program ParameterVector2GnssAntennaDefinition . As example referring to the cookbook GNSS satellite orbit determination and station network analysis , one could add additionally as parametrization. Since the estimations are done on a daily basis for each receiver we add an additional which disables parameter.receiverAntenna . After all stations are processed together with all parameters, one adds parameter.receiverAntenna with to the current selected parametrizations. The last is to write the daily normal equations including the parametrization into files. These normal equation files are then processed with the programs: NormalsAccumulate : accumulates normal equations. GnssAntennaNormalsConstraint : apply constraint to the normal equations. NormalsSolverVCE : solves the normal equations. ParameterVector2GnssAntennaDefinition : writes the solution into a antenna definition file Note that the apriori value for this parametrization is always zero and never updated according to eq. . The parameter names are <antennaName>:<antennaCenterVariations>.<gnssType>:: . Same as but for transmitting antennas (GNSS satellites). The parameter names are <antennaName>:<antennaCenterVariations>.<gnssType>:: . Add a pseudo observation equation (constraint) for each selected where is the bias and is the a priori value of the parameter if relativeToApriori is not set. The standard deviation sigma is used to weight the observation equations. Groups a set of parameters. This class can be used to structure complex parametrizations and has no further effect itself.', 'config_table': 'name string used for parameter selection apply2ndOrderCorrection boolean apply ionospheric correction apply3rdOrderCorrection boolean apply ionospheric correction applyBendingCorrection boolean apply ionospheric correction magnetosphere magnetosphereType nameConstraint string used for parameter selection sigmaSTEC expression expr. for sigma [TECU] for STEC constraint, variable E (elevation) available name string selectReceivers platformSelectorType outputfileVTEC filename variable {station} available, columns: MJD, VTEC, north gradient, east gradient mapR double constant of MSLM mapping function mapH double constant of MSLM mapping function mapAlpha double constant of MSLM mapping function gradient parametrizationTemporalType parametrization of north and east gradients name string selectReceivers platformSelectorType outputfileGriddedDataTimeSeries filename single layer VTEC [TECU] outputGrid gridType outputTimeSeries timeSeriesType inputfileGriddedDataTimeSeries filename single layer VTEC [TECU] maxDegree uint spherical harmonics parametrization temporal parametrizationTemporalType temporal evolution of VTEC values radiusIonosphericLayer double [m] radius of ionospheric single layer mapR double [m] constant of MSLM mapping function mapH double [m] constant of MSLM mapping function mapAlpha double constant of MSLM mapping function magnetosphere magnetosphereType name string used for parameter selection selectTransmitters platformSelectorType selectReceivers platformSelectorType outputfileClockTransmitter filename variable {prn} available outputfileClockReceiver filename variable {station} available nameConstraint string used for parameter selection selectTransmittersZeroMean platformSelectorType selectReceiversZeroMean platformSelectorType sigmaZeroMeanConstraint double (0 = unconstrained) sigma [m] for zero-mean constraint over all selected clocks name string used for parameter selection selectTransmitters platformSelectorType selectReceivers platformSelectorType outputfileClockTransmitter filename variable {prn} available outputfileClockReceiver filename variable {station} available huber double clock jumps > huber*sigma0 are downweighted huberPower double clock jumps > huber: sigma=(e/huber)^huberPower*sigma0 nameConstraint string used for parameter selection selectTransmittersZeroMean platformSelectorType use these transmitters for zero-mean constraint selectReceiversZeroMean platformSelectorType use these receivers for zero-mean constraint sigmaZeroMeanConstraint double (0 = unconstrained) sigma [m] for zero-mean constraint over all selected clocks name string used for parameter selection selectTransmitters platformSelectorType selectReceivers platformSelectorType outputfileSignalBiasTransmitter filename variable {prn} available outputfileSignalBiasReceiver filename variable {station} available inputfileSignalBiasTransmitter filename variable {prn} available inputfileSignalBiasReceiver filename variable {station} available name string used for parameter selection estimateTransmitterPhaseBias platformSelectorType estimateReceiverPhaseBias platformSelectorType linearGlonassBias boolean bias depends linear on frequency channel number name string used for parameter selection selectTransmitters platformSelectorType selectReceivers platformSelectorType linearGlonassBias boolean bias depends linear on frequency channel number typesClockDatum gnssType first two matching types define the ionosphere free transmitter clock (e.g. C1WG, C2WG) nameConstraint string used for parameter selection sigmaZeroMeanConstraint double (0 = unconstrained) sigma [m] for null space constraint name string used for parameter selection selectTransmitters platformSelectorType selectReceivers platformSelectorType linearGlonassBias boolean phase or code biases depend linear on frequency channel number nameConstraint string used for parameter selection sigmaZeroMeanConstraint double (0 = unconstrained) sigma [m] for null space constraint name string used for parameter selection selectTransmitters platformSelectorType outputfileBiasTimeSeries filename variable {prn} available inputfileBiasTimeSeries filename variable {prn} available type gnssType parametrizationTemporal parametrizationTemporalType nameConstraint string used for parameter selection sigmaZeroMeanConstraint double (0 = unconstrained) sigma [m] for temporal zero-mean constraint name string used for parameter selection selectReceivers platformSelectorType outputfileGriddedPosition filename delta north east up for all stations outputfilePosition filename variable {station} available, full estimated coordinates (in TRF) nameConstraint string used for parameter selection selectNoNetReceivers platformSelectorType inputfileNoNetPositions filename variable {station} available, precise coordinates used for no-net constraints (in TRF) noNetTranslationSigma double (0 = unconstrained) sigma [m] for no-net translation constraint on station coordinates noNetRotationSigma double (0 = unconstrained) sigma [m] at Earth\'s surface for no-net rotation constraint on station coordinates noNetScaleSigma double (0 = unconstrained) sigma [m] for no-net scale constraint on station coordinates huber double stations > huber*sigma0 are downweighted in no-net constraint huberPower double stations > huber: sigma=(e/huber)^huberPower*sigma0 name string used for parameter selection selectReceivers platformSelectorType outputfilePositions filename variable {station} available, estimated kinematic positions/orbit outputfileCovarianceEpoch filename variable {station} available, 3x3 epoch covariances name string used for parameter selection selectReceivers platformSelectorType outputfileOrbit filename variable {station} available outputfileParameters filename variable {station} available inputfileVariational filename variable {station} available stochasticPulse timeSeriesType [mu/s] parametrization of stochastic pulses parametrizationAcceleration parametrizationAccelerationType orbit force parameters ephemerides ephemeridesType minEstimableEpochsRatio double drop satellites with lower ratio of estimable epochs to total epochs integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint for orbit interpolation and velocity calculation name string used for parameter selection selectTransmitters platformSelectorType outputfileOrbit filename variable {prn} available outputfileParameters filename variable {prn} available inputfileVariational filename variable {prn} available stochasticPulse timeSeriesType [mu/s] parametrization of stochastic pulses parametrizationAcceleration parametrizationAccelerationType orbit force parameters ephemerides ephemeridesType minEstimableEpochsRatio double drop satellites with lower ratio of estimable epochs to total epochs integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint for orbit interpolation and velocity calculation name string used for parameter selection selectReceivers platformSelectorType outputfileTroposphere filename columns: MJD, ZHD, ZWD, dry north gradient, wet north gradient, dry east gradient, wet east gradient, ... troposphere troposphereType a priori troposphere model troposphereWetEstimation parametrizationTemporalType [m] parametrization of zenith wet delays troposphereGradientEstimation parametrizationTemporalType [degree] parametrization of north and east gradients name string used for parameter selection outputfileEOP filename EOP time series (mjd, xp, yp, sp, dUT1, LOD, X, Y, S) estimatePole parametrizationTemporalType xp, yp [mas] estimateUT1 parametrizationTemporalType rotation angle [ms] estimateNutation parametrizationTemporalType dX, dY [mas] name string used for parameter selection selectReceivers platformSelectorType antennaCenterVariations parametrizationGnssAntennaType estimate antenna center variations patternTypes gnssType gnssType for each pattern (first match is used) addNonMatchingTypes boolean add patterns for additional observed gnssTypes that don\'t match any of the above groupAntennas boolean common ACVs for same antenna build types (ignores antenna serial number) name string used for parameter selection selectTransmitters platformSelectorType antennaCenterVariations parametrizationGnssAntennaType estimate antenna center variations patternTypes gnssType gnssType for each pattern (first match is used) addNonMatchingTypes boolean add patterns for additional observed gnssTypes that don\'t match any of the above groupAntennas boolean common ACVs for same antenna build types (ignores antenna serial number) name string parameters parameterSelectorType parameter to constrain sigma double sigma of the constraint (same unit as parameter) bias double constrain all selected parameters towards this value relativeToApriori boolean constrain only dx and not full x=dx+x0 parametrization gnssParametrizationType', 'display_text': 'This class defines the models and parameters of the linearized observation equations for all phase and code measurements (see GnssProcessing) \\[\\label{gnssParametrizationType:model} \\M l - \\M f(\\M x_0) = \\left.\\frac{\\partial \\M f(\\M x)}{\\partial \\M x}\\right|_{\\M x_0} \\Delta\\M x + \\M\\epsilon, \\]where the left side is the observation vector minus the effects computed from the a priori models. After each least squares adjustment (see GnssProcessing:processingStep:estimate) the a priori parameters are updated \\[\\label{gnssParametrizationType:update} \\M x_0 := \\M x_0 + \\Delta\\hat{\\M x}. \\]The vector $\\M x_0$ can be written with GnssProcessing:processingStep:writeAprioriSolution. Any outputfiles defined in the parametrizations are written with GnssProcessing:processingStep:writeResults.
Each parametrization (and possible constraint equations) has a name which enables activating/deactivating the estimation of subsets of $\\Delta\\M x$ with GnssProcessing:processingStep:selectParametrizations. The a priori model $\\M f(\\M x_0)$ is unaffected and is always reduced.
The model for the different observation types can be described as \\[\\label{gnssParametrizationType:gnssFullModel} \\begin{split} f[\\tau\\nu a]_r^s(\\M x) &= \\text{geometry}(\\M r_r^s) + \\text{clock}^s(t) + \\text{clock}_r(t) \\\\ &+ \\text{ionosphere}([\\tau\\nu],t,\\M r_r^s) + \\text{troposphere}(t,\\M r_r^s) \\\\ &+ \\text{antenna}[\\tau\\nu a]^s + \\text{antenna}[\\tau\\nu a]_r \\\\ &+ \\text{bias}[\\tau\\nu a]^s + \\text{bias}[\\tau\\nu a]_r + \\lambda[L\\nu] N[L\\nu a]_r^s + \\text{other}(\\ldots) + \\epsilon[\\tau\\nu a]_r^s \\end{split} \\]The notation $[\\tau\\nu a]_r^s$ describes the attribution to a signal type $\\tau$ (i.e., C or L), frequency $\\nu$, signal attribute $a$ (e.g., C, W, Q, X), transmitting satellite $s$, and observing receiver $r$. It follows the RINEX 3 definition, see GnssType.
The influence of the ionosphere is modelled by a STEC parameter (slant total electron content) in terms of $[TECU]$ between each transmitter and receiver at each epoch. These parameters are pre-eliminated from the observation equations before accumulating the normal equations. This is similar to using the ionosphere-free linear combination as observations but only one STEC parameter is needed for an arbitrary number of observation types.
The influence on the code and phase observation is modeled as \\[\\label{gnssParametrizationType:IonosphereSTEC:STEC} \\begin{split} \\text{ionosphere}([C\\nu], STEC) &= \\frac{40.3}{f_{\\nu}^2}STEC + \\frac{7525\\M b^T\\M k}{f_{\\nu}^3}STEC + \\frac{r}{f_{\\nu}^4}STEC^2 \\\\ \\text{ionosphere}([L\\nu], STEC) &= -\\frac{40.3}{f_{\\nu}^2}STEC - \\frac{7525\\M b^T\\M k}{2f_{\\nu}^3}STEC - \\frac{r}{3f_{\\nu}^4}STEC^2 + \\text{bending}(E)STEC^2 \\end{split} \\]The second order term depends on the magnetosphere $\\M b$ and the direction of the signal $\\M k$.
If further information about the ionosphere is available (in the form of a prior model or as additional parametrizations such as parametrization:ionosphereMap or parametrization:ionosphereVTEC) the STEC parameters describe local and short–term scintillations. The STEC parameters are estimated as additions to the model and it is advised to constrain them towards zero with a standard deviation of sigmaSTEC.
IonosphereVTEC
The influence of the ionosphere is modelled by a VTEC parameter (vertical total electron content) in terms of $[TECU]$ for every selected receiver at each epoch. Optionally, VTEC gradients in the North (x) and East (y) direction can be estimated via gradient. The slant TEC is computed based on the VTEC and the optional North and East gradients $\\Delta V_x$ and $\\Delta V_y$ using the elevation-dependent Modified Single-Layer Model (MSLM) mapping function \\[\\label{gnssParametrizationType:IonosphereVTEC:STEC} STEC = \\frac{VTEC + \\cos(A) \\Delta V_x + \\sin(A) \\Delta V_y}{\\cos z\'} \\qquad\\text{with}\\qquad \\sin z\'= \\left(\\frac{R}{R+H}\\right)\\sin\\left(\\alpha(\\pi/2-E)\\right) \\]inserted into eq. \\eqref{gnssParametrizationType:IonosphereSTEC:STEC}, where $A$ is the azimuth angle and $E$ is the elevation angle.
This class provides a simplified model of the ionosphere for single receivers and enables the separation of the TEC and signal biases, meaning parametrization:tecBiases becomes estimable. Local and short-term scintillations should be considered by adding loosely constrained parametrization:ionosphereSTEC.
The ionosphere is parametrized in terms of $[TECU]$ in a single layer sphere with radiusIonosphericLayer as a temporally changing (e.g. hourly linear splines) spherical harmonics expansion \\[ VTEC(\\lambda,\\theta,t) = \\sum_{n=0}^{n_{max}} \\sum_{m=0}^n c_{nm}(t)C_{nm}(\\lambda,\\theta)+s_{nm}(t)S_{nm}(\\lambda,\\theta) \\]up to maxDegree=15 in a solar-geomagentic frame defined by magnetosphere. The VTEC values are mapped to STEC values in the observation equations via eq. \\eqref{gnssParametrizationType:IonosphereVTEC:STEC}.
Clock errors are estimated epoch-wise for each selectTransmitters/Receivers. No clock errors are estimated if no valid observations are available (e.g. data gaps in the observations).
If all transmitters and receivers are selected by selectTransmitters and selectReceivers respectively, these parameters will be lineary dependent which would lead to a rank deficiency in the normal equation matrix. To circumvent this issue, the estimation requires an additional zero-mean constraint added in each epoch. This is realized with an additional observation equation \\[ 0 = \\frac{1}{n_i + n_k} (\\sum_i \\Delta t^{s_i} + \\sum_k \\Delta t_{r_k}) \\]summed over all selectTransmitters/ReceiversZeroMean with a standard deviation of sigmaZeroMeanConstraint.
The clock error of an epoch can be predicted by the clock error of the preceding epoch and an unknown clock drift \\[ \\Delta t_{i+1} = \\Delta t_{i} + t_{drift} dt + \\epsilon_i. \\]This equation is applied as an additional constraint equation in each epoch \\[ 0 = \\Delta t_{i+1} - \\Delta t_{i} - t_{drift} dt + \\epsilon_i. \\]The variance $\\sigma^2(\\epsilon)$ is estimated iteratively by variance component estimation (VCE). Clock jumps are treated as outliers and are automatically downweighted as described in GnssProcessing:processingStep:estimate.
The absolute initial clock error and clock drift cannot be determined if all receiver and transmitter clocks are estimated together due to their linear dependency. This linear dependency would lead to a rank deficiency in the normal equation matrix in the same manner as described in parametrization:clocks. To circumvent this issue, an additional zero-mean constraint is added in each epoch as observation equation \\[ 0 = \\frac{1}{n_i + n_k} (\\sum_i \\Delta t^{s_i} + \\sum_k \\Delta t_{r_k}) \\]summed over all selectTransmitters/ReceiversZeroMean. This should be a loose constraint with a relatively large standard deviation of sigmaZeroMeanConstraint.
The parameter names are <station or prn>:clock::<time> and <station or prn>:clockDrift::.
SignalBiases
Each code and phase observation (e.g C1C or L2W) contains a bias at transmitter/receiver level \\[ [\\tau\\nu a]_r^s(t) = \\dots + \\text{bias}[\\tau\\nu a]^s + \\text{bias}[\\tau\\nu a]_r + \\dots \\]This class provides the apriori model $\\M f(\\M x_0)$ of eq. \\eqref{gnssParametrizationType:model} only.
The estimation of the biases is complex due to different linear dependencies, which result in rank deficiencies in the system of normal equations. For simplification the parametrization for $\\Delta\\M x$ has been split into: parametrization:codeBiases, parametrization:tecBiases, and parametrization:ambiguities (including phase biases). The file handling on the other hand still remains within this class. Any prior values for the transmitter/receiver biases are read with the respective inputfileSignalBiasTransmitter/Receiver. All biases for a transmitter/receiver are accumulated and written to the respective outputfileSignalBiasTransmitter/Receiver.
Ambiguities
Sets up an ambiguity parameter for each track and phase observation type. \\[ [L\\nu a]_r^s(t) = \\dots + \\text{bias}[L\\nu a]^s + \\text{bias}[L\\nu a]_r + \\lambda[L\\nu] N[L\\nu a]_r^s \\]As the phase observations contain a float bias at transmitter/receiver level, not all ambiguities are resolvable to integer values. The number of resolvable ambiguities can be increased with known phase biases read from file via parametrization:signalBiases. In this case, estimateTransmitter/ReceiverPhaseBias should not be used for the corresponding transmitters and receivers.
In case of GLONASS, the phase biases at receiver level differ between different frequency channels (frequency division multiple access, FDMA) and for each channel an extra float phase bias is estimated. With linearGlonassBias a linear relationship between bias and frequency channel is assumed, which reduces the number of float bias parameters and increases the number of resolvable integer ambiguities.
Each code observation (e.g C1C or C2W) contains a bias at transmitter/receiver level \\[ [C\\nu a]_r^s(t) = \\dots + \\text{bias}[C\\nu a]^s + \\text{bias}[C\\nu a]_r + \\dots \\]The code biases cannot be estimated together with clock errors and ionospheric delays in an absolute sense as rank deficiencies will occur in the system of normal equations. Therefore, the biases are not initialized and set up as parameters directly but only estimable linear combinations are parametrized.
The basic idea is to set up simplified normal equations with the biases, clock and STEC parameters of one single receiver or transmitter, eliminate clock and STEC parameters and perform an eigen value decomposition of the normal equation matrix \\[ \\M N = \\M Q \\M\\Lambda \\M Q^T. \\]Instead of estimating the original bias parameter $\\M x$ a transformed set $\\bar{\\M x}$ is introduced: \\[ \\bar{\\M x} = \\M Q^T \\M x. \\]The new parameters corresponding to eigen values $\\lambda>0$ are estimable, the others are left out (set to zero). The behavior can be controlled by explicitly setting up to two bias types with typesClockDatum for each transmitter to zero. These then define the ionosphere-free clock datum of the transmitter. The missing linear combinations, which depend on the STEC parameters, can be added with parametrization:tecBiases.
Additional rank deficiencies may also occur when biases of transmitters and receivers are estimated together. The minimum norm nullspace (also via eigen value decomposition) is formulated as zero constraint equations and added with a standard deviation of sigmaZeroMeanConstraint.
In case of GLONASS the code biases at receiver level can differ between different frequency channels (frequency division multiple access, FDMA) and for each channel an extra code bias is estimated. With linearGlonassBias a linear relationship between bias and frequency channel is assumed, which reduces the number of bias parameters.
The parameter names are <station or prn>:codeBias0<index><combi of gnssTypes>::.
TecBiases
Each code observation (e.g C1C or C2W) contains a bias at transmitter/receiver level \\[ [C\\nu a]_r^s(t) = \\dots + \\text{bias}[C\\nu a]^s + \\text{bias}[C\\nu a]_r + \\ldots \\]This parametrization represents the linear combination of signal biases which completely depend on the STEC parameters. Ignoring these bias combinations would result in a biased STEC estimation (all other parameters are nearly unaffected). To determine this part of the signal biases the parametrization:ionosphereSTEC should be constrained. Furthermore, additional information about the ionosphere is required from parametrization:ionosphereVTEC or parametrization:ionosphereMap.
Rank deficiencies due to the signal bias parameters may occur if biases of transmitters and receivers are estimated together. The minimum norm nullspace is formulated as zero constraint equations and added with a standard deviation of sigmaZeroMeanConstraint.
The parameter names are <station or prn>:tecBias0<index><combi of gnssTypes>::.
TemporalBias
This parametrization resolves the issue of some phase observations suffering from time-variable biases. Such a phenomenon has been found to affect GPS block IIF satellites on the L5 phase measurements (see Montenbruck et al. 2011, DOI: 10.1007/s10291-011-0232-x).
For these time-variable biases an appropriate temporal representation has to be defined in parametrizationTemporal. For example, time-variable biases for GPS block IIF L5 phase observations (type=L5*G) can be represented by a cubic spline with a nodal distance of one hour.
This parametrization should be set up in addition to the constant parametrization:signalBiases. Depending on the temporal representation a temporal zero-mean constraint is needed to separate this parametrization from the constant component. The constraint equations are added with a standard deviation of sigmaZeroMeanConstraint.
The parameter names are <prn>:signalBias.<gnssType>:<temporal>:<interval>.
StaticPositions
Estimates a static position for all selectReceivers in the terrestrial frame.
No-net constraints can be applied for a subset of stations, selectNoNetReceivers, with a standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma. If the template inputfileNoNetPositions is provided the constraints are applied relatively to these positions. Only stations with an existing position file are considered. Without inputfileNoNetPositions the constraints are applied towards the apriori values from GnssProcessing:receiver. As a single corrupted station position can disturb the no-net conditions, the rotation/translation parameters are estimated in a robust least squares adjustment beforehand. The computed weight matrix is used to downweight corrupted stations in the constraint equations.
In case you want to align to an ITRF/IGS reference frame, precise coordinates can be generated with Sinex2StationPositions.
The estimation of (reduced) dynamic orbits is formulated as variational equations. It is based on inputfileVariational calculated with PreprocessingVariationalEquation. Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree. The parametrizationAcceleration must include at least those parameters that were estimated in PreprocessingVariationalEquationOrbitFit. Additional stochasticPulse parameters can be set up to reduce orbit mismodeling. If not enough epochs with observations are available (minEstimableEpochsRatio) the LEO satellite is disabled.
A priori tropospheric correction is handled by a troposphere model (e.g. Vienna Mapping Functions 3). Additional parameters in $[m]$ for zenith wet delay and gradients can be set up via troposphereWetEstimation (usually 2-hourly linear splines) and troposphereGradientEstimation (usually a daily trend). These parameters can be soft-constrained using parametrization:constraints to avoid an unsolvable system of normal equations in case of data gaps.
Constraints on the defined parameters can be added via parametrization:constraints. An example would be to set up estimateUT1:constant so the $dUT1$ parameter is included in the normal equation system . Since $dUT1$ cannot be determined by GNSS, a hard constraint to its a priori value can then be added.
This class is for parametrization the antenna for their antenna center offsets (ACO) and antenna center variations (ACV) by antennaCenterVariations. The receivers to be estimated can be selected by selectReceivers.
The amount of patterns to be estimated is configurable with a list of patternTypes. For each added patternTypes a set of parameters will be evaluated. The observations will be assigned to the first patternTypes that matches their own. E.g. having the patterns: ***G and L1* would lead to all GPS observations be assigned to the observation equations of the first pattern. The pattern type L1* would then consist of all other GNSS L1 phase observations. addNonMatchingTypes will, if activated, create automatically patterns for observations that are not selected within the list patternTypes. Furthermore, it is possible to group same antenna build types from different receivers by groupAntennas. The grouping by same antenna build ignores antenna serial numbers.
Note that the apriori value $\\M x_0$ for this parametrization is always zero and never updated according to eq. \\eqref{gnssParametrizationType:update}.
The parameter names are <antennaName>:<antennaCenterVariations>.<gnssType>::.
TransmitterAntennas
Same as receiverAntennas but for transmitting antennas (GNSS satellites).
The parameter names are <antennaName>:<antennaCenterVariations>.<gnssType>::.
Constraints
Add a pseudo observation equation (constraint) for each selected parameters \\[ b-x_0 = 1 \\cdot dx + \\epsilon, \\]where $b$ is the bias and $x_0$ is the a priori value of the parameter if relativeToApriori is not set. The standard deviation sigma is used to weight the observation equations.
Group
Groups a set of parameters. This class can be used to structure complex parametrizations and has no further effect itself.'},
'gnssProcessingStepType': { 'name': 'gnssProcessingStepType', 'key': 'gnssProcessingStepType', 'description': 'Processing step in GnssProcessing . Processing steps enable a dynamic definition of the consecutive steps performed during any kind of GNSS processing. The most common steps are , which performs an iterative least squares adjustment, and , which writes all output files defined in GnssProcessing and is usually the last step. Some steps such as , , , and affect all subsequent steps. In case these steps are used within a or step, they only affect the steps within this level. For usage examples see cookbooks on GNSS satellite orbit determination and network analysis or Kinematic orbit determination of LEO satellites . Iterative non-linear least squares adjustment. In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters. The estimated parameters serve as a priori values in the next iteration and the following processing steps. Iterates until either every single parameter update (converted to an influence in meters) is below a convergenceThreshold or maxIterationCount is reached. With computeResiduals the observation equations are computed again after each update to compute the observation residuals. The overall standard deviation of a single observation used for the weighting is composed of several factors where is the signal type, the azimuth and elevation dependent is given by and the other factors are estimated iteratively from the residuals. With computeWeights a standardized variance for each residual is computed taking the redundancy into account. If is above a threshold huber the observation gets a higher standard deviation used for weighting according to similar to robust least squares adjustment . With adjustSigma0 individual variance factors can be computed for each station and all phases of a system and each code observation type (e.g. for each L**G , L**E , C1CG , C2WG , C1CE , ) separately Performs a least squares adjustment like but with additional integer phase ambiguity resolution. After this step all resolved ambiguities are removed from the normal equation system. Only ambiguities involving are resolved. If is not set, all usable transmitters and/or receivers are selected for ambiguity resolution. Integer ambiguity resolution is performed based on the least squares ambiguity decorrelation adjustment (LAMBDA) method (Teunissen 1995, DOI ), specifically the modified algorithm (MLAMBDA) by Chang et al. (2005, DOI ). First the covariance matrix of the integer ambiguity parameters is computed by eliminating all but those parameters from the full normal equation matrix and inverting it. Then, a Z-transformation is performed as described by Chang et al. (2005) to decorrelate the ambiguity parameters without losing their integer nature. The search process follows MLAMBDA and uses integer minimization of the weighted sum of squared residuals. It is computationally infeasible to search a hyper-ellipsoid with a dimension of ten thousand or more. Instead, a blocked search algorithm is performed by moving a window with a length of, for example, searchBlockSize = 200 parameters over the decorrelated ambiguities, starting from the most accurate. In each step, the window is moved by half of its length and the overlapping parts are compared to each other. If all fixed ambiguities in the overlap agree, the algorithm continues. Otherwise, both windows are combined and the search is repeated using the combined window, again comparing with the overlapping part of the preceding window. If not all solutions could be checked for a block after maxSearchSteps , the selected incompleteAction is performed. If the algorithm reaches ambiguities with a standard deviation higher than sigmaMaxResolve , ambiguity resolution stops and the remaining ambiguities are left as float values. Otherwise, all ambiguity parameters are fixed to integer values. In contrast to an integer least squares solution over the full ambiguity vector, it is not guaranteed that the resulting solution is optimal in the sense of minimal variance with given covariance. This trade-off is necessary to cope with large numbers of ambiguities. Accumulates the normal equations and computes the covariance matrix as inverse of the normal matrix. It is not the full inverse but only the elements which are set in the normal matrix (see ) are computed. The matrix is passed to the . Only used in to get the epoch-wise covariance information at the moment. In this step all outputfiles defined in are written. It considers the settings of , , and . It is usually the last processing step, but can also be used at other points in the processing in combination with suffix to write intermediate results, for example before to output the float solution. Accumulates the normal equations matrix and writes it. If is set only the selected parameters are written to the normal equations and all other parameters are eliminated beforehand (implicitly solved). The solution of the normals would result in (see ). To write the appropriate apriori vector use . Writes the current apriori vector (see ). If is set only the selected parameters are written. Writes the observation residuals for all . For each station a file is written. The file name is interpreted as a template with the variable {station} being replaced by the station name. Writes a list of receivers (stations) which are used in the last step and selected by . Writes a list of transmitters which are used in the last step and selected by . Print residual statistics. areq: C1CG**: factor = 0.64, sigma0 = 1.00, count = 2748, outliers = 48 (1.75 ) areq: C1WG**: factor = 0.50, sigma0 = 1.00, count = 2748, outliers = 43 (1.56 ) areq: C2WG**: factor = 0.50, sigma0 = 1.00, count = 2748, outliers = 59 (2.15 ) areq: C5XG**: factor = 0.46, sigma0 = 1.00, count = 1279, outliers = 23 (1.80 ) areq: L1CG**: factor = 0.86, sigma0 = 0.96, count = 2748, outliers = 40 (1.46 ) areq: L1WG**: factor = 0.86, sigma0 = 1.02, count = 2748, outliers = 40 (1.46 ) areq: L2WG**: factor = 0.86, sigma0 = 0.96, count = 2748, outliers = 40 (1.46 ) areq: L5XG**: factor = 0.86, sigma0 = 1.30, count = 1279, outliers = 14 (1.09 ) areq: C1PR**: factor = 0.48, sigma0 = 1.00, count = 1713, outliers = 53 (3.09 ) areq: C2PR**: factor = 0.55, sigma0 = 1.00, count = 1713, outliers = 51 (2.98 ) areq: L1PR**: factor = 0.85, sigma0 = 1.09, count = 1713, outliers = 29 (1.69 ) areq: L2PR**: factor = 0.85, sigma0 = 0.88, count = 1713, outliers = 29 (1.69 ) areq: C1XE**: factor = 0.44, sigma0 = 1.00, count = 1264, outliers = 21 (1.66 ) areq: C5XE**: factor = 0.33, sigma0 = 1.00, count = 1264, outliers = 27 (2.14 ) areq: C7XE**: factor = 0.28, sigma0 = 1.00, count = 1264, outliers = 41 (3.24 ) areq: L1XE**: factor = 0.82, sigma0 = 1.14, count = 1264, outliers = 15 (1.19 ) areq: L5XE**: factor = 0.82, sigma0 = 0.84, count = 1264, outliers = 15 (1.19 ) areq: L7XE**: factor = 0.82, sigma0 = 0.94, count = 1264, outliers = 15 (1.19 ) badg: C1CG**: factor = 1.25, sigma0 = 1.00, count = 2564, outliers = 47 (1.83 ) ... Enable/disable parameter groups and constraint groups for subsequent steps, e.g. or . The name and nameConstraint of these groups are defined in . Prior models or previously estimated parameters used as new apriori values are unaffected and they are always reduced from the observations. This means all unselected parameters are kept fixed to their last result. An example would be to process at a 5-minute sampling using and then at the end to densify the clock parameters to the full 30-second observation sampling while keeping all other parameters fixed ( disable = * , enable = *.clock* , enable = parameter.STEC ). Select epochs for subsequent steps. This step can be used to reduce the processing sampling while keeping the original observation sampling for all preprocessing steps (e.g. outlier and cycle slip detection). Another example is to process at a 5-minute sampling by setting nthEpoch = 10 and then at the end to densify only the clock parameters to the full 30-second observation sampling by setting nthEpoch = 1 while keeping all other parameters fixed with . Select block structure of sparse normal equations for subsequent steps. This step can be used to define the structure of the different parts of the normal equation system, which can have a major impact on computing performance and memory consumption depending on the processing setup. The normal equation system is divided into three parts for epoch, interval, and ambiguity parameters. The epoch part is subdivided further into one subpart per epoch. Each part is divided into blocks and only non-zero blocks are stored in memory to reduce memory consumption and to prevent unnecessary matrix computations. defaultBlockSizeEpoch , defaultBlockSizeInterval , and defaultBlockSizeAmbiguity control the size of the blocks within each part of the normal equations. defaultBlockReceiverCount can be set to group a number of receivers into one block within the epoch and interval parts. If keepEpochNormalsInMemory = no epoch blocks are eliminated after they are set up to reduce the number of parameters in the normal equation system. defaultBlockCountReduction controls after how many epoch blocks an elimination step is performed. For larger processing setups or high sampling rates epoch block elimination is recommended as the large number of clock parameters require a lot of memory. This step can be used to process only a subset of stations in subsequent processing steps. The most common use is to start the processing with a well-distributed network of core stations as seen in GNSS satellite orbit determination and network analysis . To later process all other stations individually, use the processing step and select all stations excluding the core stations in that step. Perform these processing steps for each separately. All non-receiver-related parameters parameters are disabled in these processing steps. This step can be used for individual precise point positioning (PPP) of all stations. During GNSS satellite orbit determination and network analysis this step is used after the initial processing of the core network to process all other stations individually. In that case provide the same station list as in this step that was used as in the step where the core network was selected. Perform these processing steps. This step can be used to structure complex processing flows. The s that affect the following steps (those beginning with Select) only have an effect until the end of the group. Disable transmitter epochs during eclipse. With proper attitude modeling (see SimulateStarCameraGnss ) this is usually not necessary.', 'config_table': 'computeResiduals boolean adjustSigma0 boolean adjust sigma0 by scale factor (per receiver and type) computeWeights boolean downweight outliers huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^huberPower*sigma0 convergenceThreshold double [m] stop iteration once full convergence is reached maxIterationCount uint maximum number of iterations outputfileAmbiguities filename resolved ambiguities selectTransmitters platformSelectorType only resolve ambiguities with these participating transmitters selectReceivers platformSelectorType only resolve ambiguities with these participating receivers sigmaMaxResolve double max. allowed std. dev. of ambiguity to resolve [cycles] searchBlockSize uint block size for blocked integer search maxSearchSteps uint max. steps of integer search for each block incompleteAction choice if not all solutions tested after maxSearchSteps stop stop searching, ambiguities remain float in this block resolve use best integer solution found so far shrinkBlockSize try again with half block size throwException stop and throw an exception computeResiduals boolean adjustSigma0 boolean adjust sigma0 by scale factor (per receiver and type) computeWeights boolean downweight outliers huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^huberPower*sigma0 suffix string appended to every output file name (e.g. orbit.G01.suffix.dat) outputfileNormalEquations filename normals remainingParameters parameterSelectorType parameter order/selection of output normal equations constraintsOnly boolean write only normals of constraints without observations defaultNormalsBlockSize uint block size for distributing the normal equations, 0: one block, empty: original block size outputfileAprioriSolution filename a priori parameters outputfileParameterNames filename parameter names remainingParameters parameterSelectorType parameter order/selection of output normal equations selectReceivers platformSelectorType subset of used stations outputfileResiduals filename variable {station} available selectReceivers platformSelectorType subset of used stations outputfileUsedStationList filename ascii file with names of used stations selectTransmitters platformSelectorType subset of used transmitters outputfileUsedTransmitterList filename ascii file with PRNs parametrization choice enable sequence name string wildcards: * and ? disable sequence name string wildcards: * and ? nthEpoch uint use only every nth epoch in all subsequent processing steps defaultBlockSizeEpoch uint block size of epoch parameters, 0: one block defaultBlockSizeInterval uint block size of interval parameters, 0: one block defaultBlockSizeAmbiguity uint block size of ambiguity parameters, 0: one block defaultBlockReceiverCount uint number of receivers to group into one block for epoch and interval defaultBlockCountReduction uint minimum number of blocks for epoch reduction keepEpochNormalsInMemory boolean speeds up processing but uses much more memory accumulateEpochObservations boolean set up all observations per epoch and receiver at once selectReceivers platformSelectorType selectReceivers platformSelectorType variableReceiver string variable is set for each receiver processingStep gnssProcessingStepType steps are processed consecutively processingStep gnssProcessingStepType steps are processed consecutively selectTransmitters platformSelectorType disableShadowEpochs boolean disable epochs if satellite is in Earth\'s/Moon\'s shadow disablePostShadowRecoveryEpochs boolean disable epochs if satellite is in post-shadow recovery maneuver for GPS block IIA ephemerides ephemeridesType eclipse eclipseType eclipse model used to determine if a satellite is in Earth\'s shadow', 'display_text': 'Processing step in GnssProcessing.
Processing steps enable a dynamic definition of the consecutive steps performed during any kind of GNSS processing. The most common steps are estimate, which performs an iterative least squares adjustment, and writeResults, which writes all output files defined in GnssProcessing and is usually the last step. Some steps such as selectParametrizations, selectEpochs, selectNormalsBlockStructure, and selectReceivers affect all subsequent steps. In case these steps are used within a group or forEachReceiverSeparately step, they only affect the steps within this level.
Iterative non-linear least squares adjustment. In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters. The estimated parameters serve as a priori values in the next iteration and the following processing steps. Iterates until either every single parameter update (converted to an influence in meters) is below a convergenceThreshold or maxIterationCount is reached.
With computeResiduals the observation equations are computed again after each update to compute the observation residuals.
The overall standard deviation of a single observation used for the weighting is composed of several factors \\[ \\hat{\\sigma}_i = \\hat{\\sigma}_i^{huber} \\hat{\\sigma}_{[\\tau\\nu a]}^{recv} \\sigma_{[\\tau\\nu a]}^{recv}(E,A), \\]where $[\\tau\\nu a]$ is the signal type, the azimuth and elevation dependent $\\sigma_{[\\tau\\nu a]}^{recv}(E,A)$ is given by receiver:inputfileAccuracyDefinition and the other factors are estimated iteratively from the residuals.
With computeWeights a standardized variance $\\hat{s}_i^2$ for each residual $\\hat{\\epsilon}_i$ is computed \\[ \\hat{s}_i^2 = \\frac{1}{\\hat{\\sigma}_{[\\tau\\nu a]}^{recv} \\sigma_{[\\tau\\nu a]}^{recv}(E,A)}\\frac{\\hat{\\epsilon}_i^2}{r_i} \\qquad\\text{with}\\qquad r_i = \\left(\\M A\\left(\\M A^T\\M A\\right)^{-1}\\M A^T\\right)_{ii} \\]taking the redundancy $r_i$ into account. If $\\hat{s}_i$ is above a threshold huber the observation gets a higher standard deviation used for weighting according to \\[ \\hat{\\sigma}_i^{huber} = \\left\\{ \\begin{array}{ll} 1 & s < huber,\\\\ (\\hat{s}_i/huber)^{huberPower} & s \\ge huber \\end{array} \\right., \\]similar to robust least squares adjustment.
With adjustSigma0 individual variance factors can be computed for each station and all phases of a system and each code observation type (e.g. for each L**G, L**E, C1CG, C2WG, C1CE, ) separately \\[ \\hat{\\sigma}_{[\\tau\\nu a]}^{recv} = \\sqrt{\\frac{\\hat{\\M\\epsilon}^T\\M P\\hat{\\M\\epsilon}}{r}}. \\]
ResolveAmbiguities
Performs a least squares adjustment like processingStep:estimate but with additional integer phase ambiguity resolution. After this step all resolved ambiguities are removed from the normal equation system. Only ambiguities involving selectTransmitters/Receivers are resolved. If selectTransmitters/Receivers is not set, all usable transmitters and/or receivers are selected for ambiguity resolution.
Integer ambiguity resolution is performed based on the least squares ambiguity decorrelation adjustment (LAMBDA) method (Teunissen 1995, DOI 10.1007/BF00863419), specifically the modified algorithm (MLAMBDA) by Chang et al. (2005, DOI 10.1007/s00190-005-0004-x). First the covariance matrix of the integer ambiguity parameters is computed by eliminating all but those parameters from the full normal equation matrix and inverting it. Then, a Z-transformation is performed as described by Chang et al. (2005) to decorrelate the ambiguity parameters without losing their integer nature.
The search process follows MLAMBDA and uses integer minimization of the weighted sum of squared residuals. It is computationally infeasible to search a hyper-ellipsoid with a dimension of ten thousand or more. Instead, a blocked search algorithm is performed by moving a window with a length of, for example, searchBlockSize=200 parameters over the decorrelated ambiguities, starting from the most accurate. In each step, the window is moved by half of its length and the overlapping parts are compared to each other. If all fixed ambiguities in the overlap agree, the algorithm continues. Otherwise, both windows are combined and the search is repeated using the combined window, again comparing with the overlapping part of the preceding window. If not all solutions could be checked for a block after maxSearchSteps, the selected incompleteAction is performed. If the algorithm reaches ambiguities with a standard deviation higher than sigmaMaxResolve, ambiguity resolution stops and the remaining ambiguities are left as float values. Otherwise, all ambiguity parameters are fixed to integer values.
In contrast to an integer least squares solution over the full ambiguity vector, it is not guaranteed that the resulting solution is optimal in the sense of minimal variance with given covariance. This trade-off is necessary to cope with large numbers of ambiguities.
ComputeCovarianceMatrix
Accumulates the normal equations and computes the covariance matrix as inverse of the normal matrix. It is not the full inverse but only the elements which are set in the normal matrix (see gnssProcessingStep:selectNormalsBlockStructure) are computed. The matrix is passed to the parametrizations. Only used in parametrizations:kinematicPositions to get the epoch-wise covariance information at the moment.
It is usually the last processing step, but can also be used at other points in the processing in combination with suffix to write intermediate results, for example before gnssProcessingStep:resolveAmbiguities to output the float solution.
WriteNormalEquations
Accumulates the normal equations matrix and writes it. If remainingParameters is set only the selected parameters are written to the normal equations and all other parameters are eliminated beforehand (implicitly solved).
Writes the observation residuals for all selectReceivers. For each station a file is written. The file name is interpreted as a template with the variable {station} being replaced by the station name.
WriteUsedStationList
Writes a list of receivers (stations) which are used in the last step and selected by selectReceivers.
WriteUsedTransmitterList
Writes a list of transmitters which are used in the last step and selected by selectTransmitters.
Enable/disable parameter groups and constraint groups for subsequent steps, e.g. processingStep:estimate or processingStep:writeResults. The name and nameConstraint of these groups are defined in parametrizations. Prior models or previously estimated parameters used as new apriori $\\M x_0$ values are unaffected and they are always reduced from the observations. This means all unselected parameters are kept fixed to their last result.
An example would be to process at a 5-minute sampling using processingStep:selectEpochs and then at the end to densify the clock parameters to the full 30-second observation sampling while keeping all other parameters fixed (disable=*, enable=*.clock*, enable=parameter.STEC).
SelectEpochs
Select epochs for subsequent steps. This step can be used to reduce the processing sampling while keeping the original observation sampling for all preprocessing steps (e.g. outlier and cycle slip detection). Another example is to process at a 5-minute sampling by setting nthEpoch=10 and then at the end to densify only the clock parameters to the full 30-second observation sampling by setting nthEpoch=1 while keeping all other parameters fixed with processingStep:selectParametrizations.
SelectNormalsBlockStructure
Select block structure of sparse normal equations for subsequent steps.
This step can be used to define the structure of the different parts of the normal equation system, which can have a major impact on computing performance and memory consumption depending on the processing setup.
The normal equation system is divided into three parts for epoch, interval, and ambiguity parameters. The epoch part is subdivided further into one subpart per epoch. Each part is divided into blocks and only non-zero blocks are stored in memory to reduce memory consumption and to prevent unnecessary matrix computations. defaultBlockSizeEpoch, defaultBlockSizeInterval, and defaultBlockSizeAmbiguity control the size of the blocks within each part of the normal equations. defaultBlockReceiverCount can be set to group a number of receivers into one block within the epoch and interval parts.
If keepEpochNormalsInMemory=no epoch blocks are eliminated after they are set up to reduce the number of parameters in the normal equation system. defaultBlockCountReduction controls after how many epoch blocks an elimination step is performed. For larger processing setups or high sampling rates epoch block elimination is recommended as the large number of clock parameters require a lot of memory.
SelectReceivers
This step can be used to process only a subset of stations in subsequent processing steps. The most common use is to start the processing with a well-distributed network of core stations as seen in GNSS satellite orbit determination and network analysis. To later process all other stations individually, use the processing step processingStep:forEachReceiverSeparately and select all stations excluding the core stations in that step.
ForEachReceiverSeparately
Perform these processing steps for each selectReceivers separately. All non-receiver-related parameters parameters are disabled in these processing steps.
Perform these processing steps. This step can be used to structure complex processing flows. The processingSteps that affect the following steps (those beginning with Select) only have an effect until the end of the group.
DisableTransmitterShadowEpochs
Disable transmitter epochs during eclipse. With proper attitude modeling (see SimulateStarCameraGnss) this is usually not necessary.'},
'gnssReceiverGeneratorType': { 'name': 'gnssReceiverGeneratorType', 'key': 'gnssReceiverGeneratorType', 'description': 'Definition and basic information of GNSS receivers. Most of the input files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops (marked with * below). These files are regularly updated. * : Antenna and receiver information, antenna reference point offsets, antenna orientations. Created via GnssStationLog2Platform or PlatformCreate . * : Antenna center offsets and variations. Created via GnssAntex2AntennaDefinition or GnssAntennaDefinitionCreate . : Observed signal types (optional). Created via GnssReceiverDefinitionCreate in case you want to define which signal types a receiver model can observe. * : Elevation and azimuth dependent accuracy. Created via GnssAntennaDefinitionCreate . : Converted from RINEX observation files via RinexObservation2GnssReceiver . It is possible to limit the observation types to be used in the processing by a list of and any observation types not defined within the list are ignored and discarded. Similarly observations defined in the list of are ignored and discarded. The codes used follow the . Each receiver goes through a preprocessing step individually, where observation outliers are removed or downweighted, continuous tracks of phase observations are defined for ambiguity parametrization, cycle slips are detected, and receivers are disabled if they do not fulfill certain requirements. The preprocessing step consists of an initial PPP estimation done by robust least squares adjustment and checks whether the position error of the solutions exceeds codeMaxPositionDiff . If the error exceeds the threshold the receiver will be discarded. The preprocessing also sets initial clock error values and removes tracks that stay below a certain elevation mask ( elevationTrackMinimum ). See also GnssProcessing and GnssSimulateReceiver . A network of GNSS ground stations is defined via . Each line can contain more than one station. The first station in each line for which exists and contains enough observations is used for the processing. All input files except , , and are read for each station. The file name is interpreted as a template with the variable {station} being replaced by the station name. The effects of loading and tidal deformation on station positions can be corrected for via and , respectively. Tidal deformations typically include: : Earth tidal deformations (IERS conventions) : ocean tidal deformations (e.g. fes2014b n720, minDegree = 1 ) : atmospheric tidal deformation (e.g. AOD1B RL06, minDegree = 1 ) : pole tidal deformations (IERS conventions) : ocean pole tidal deformations (IERS conventions) A single low-Earth orbiting (LEO) satellite with an onboard GNSS receiver. An apriori orbit is needed as . Attitude data must be provided via . If no attitude data is available from the satellite operator, the star camera data can be simulated by using SimulateStarCamera .', 'config_table': 'inputfileStationList filename ascii file with station names maxStationCount uint maximum number of stations to be used inputfileStationInfo filename variable {station} available. station metadata (antennas, receivers, ...) inputfileAntennaDefinition filename antenna center offsets and variations noAntennaPatternFound choice what should happen if no antenna pattern is found for an observation ignoreObservation ignore observation if no matching pattern is found useNearestFrequency use pattern of nearest frequency if no matching pattern is found throwException throw exception if no matching pattern is found inputfileReceiverDefinition filename observed signal types inputfileAccuracyDefinition filename elevation and azimuth dependent accuracy inputfileStationPosition filename variable {station} available. disableStationWithoutPosition boolean drop stations without apriori position inputfileClock filename variable {station} available inputfileObservations filename variable {station} available loadingDisplacement gravityfieldType loading deformation tidalDisplacement tidesType tidal deformation ephemerides ephemeridesType for tidal deformation inputfileDeformationLoadLoveNumber filename inputfilePotentialLoadLoveNumber filename if full potential is given and not only loading potential useType gnssType only use observations that match any of these patterns ignoreType gnssType ignore observations that match any of these patterns elevationCutOff angle [degree] ignore observations below cutoff elevationTrackMinimum angle [degree] ignore tracks that never exceed minimum elevation minObsCountPerTrack uint tracks with less number of epochs with observations are dropped minEstimableEpochsRatio double [0,1] drop stations with lower ratio of estimable epochs to total epochs preprocessing sequence settings for preprocessing of observations/stations printStatistics boolean print preprocesssing statistics for all receivers huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^huberPower*sigma0 codeMaxPositionDiff double [m] max. allowed position error by PPP code only clock error estimation denoisingLambda double regularization parameter for total variation denoising used in cylce slip detection tecWindowSize uint (0 = disabled) window size for TEC smoothness evaluation used in cycle slip detection tecSigmaFactor double factor applied to moving standard deviation used as threshold in TEC smoothness evaluation during cycle slip detection outputfileTrackBefore filename variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track before cycle slip detection outputfileTrackAfter filename variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track after cycle slip detection inputfileStationInfo filename satellite metadata (antenna, receiver, ...) inputfileAntennaDefinition filename antenna center offsets and variations noAntennaPatternFound choice what should happen if no antenna pattern is found for an observation ignoreObservation ignore observation if no matching pattern is found useNearestFrequency use pattern of nearest frequency if no matching pattern is found throwException throw exception if no matching pattern is found inputfileReceiverDefinition filename observed signal types inputfileAccuracyDefinition filename elevation and azimut dependent accuracy inputfileObservations filename inputfileOrbit filename approximate positions inputfileStarCamera filename satellite attitude sigmaFactorPhase expression PHASE: factor = f(FREQ, ELE, SNR, ROTI, dTEc, IONOINDEX) sigmaFactorCode expression CODE: factor = f(FREQ, ELE, SNR, ROTI, dTEc, IONOINDEX) supportsIntegerAmbiguities boolean receiver tracks full cycle integer ambiguities wavelengthFactor double factor to account for half-wavelength observations (collected by codeless squaring techniques) useType gnssType only use observations that match any of these patterns ignoreType gnssType ignore observations that match any of these patterns elevationCutOff angle [degree] ignore observations below cutoff minObsCountPerTrack uint tracks with less number of epochs with observations are dropped preprocessing sequence settings for preprocessing of observations/stations printStatistics boolean print preprocesssing statistics for all receivers huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^huberPower*sigma0 codeMaxPositionDiff double [m] max. allowed position error by PPP code only clock error estimation denoisingLambda double regularization parameter for total variation denoising used in cylce slip detection tecWindowSize uint (0 = disabled) window size for TEC smoothness evaluation used in cycle slip detection tecSigmaFactor double factor applied to moving standard deviation used as threshold in TEC smoothness evaluation during cycle slip detection outputfileTrackBefore filename variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track before cycle slip detection outputfileTrackAfter filename variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track after cycle slip detection', 'display_text': 'Definition and basic information of GNSS receivers.
Most of the input files are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops (marked with * below). These files are regularly updated.
It is possible to limit the observation types to be used in the processing by a list of useType and any observation types not defined within the list are ignored and discarded. Similarly observations defined in the list of ignoreType are ignored and discarded. The codes used follow the RINEX 3 definition.
Each receiver goes through a preprocessing step individually, where observation outliers are removed or downweighted, continuous tracks of phase observations are defined for ambiguity parametrization, cycle slips are detected, and receivers are disabled if they do not fulfill certain requirements. The preprocessing step consists of an initial PPP estimation done by robust least squares adjustment and checks whether the position error of the solutions exceeds codeMaxPositionDiff. If the error exceeds the threshold the receiver will be discarded. The preprocessing also sets initial clock error values and removes tracks that stay below a certain elevation mask (elevationTrackMinimum).
A network of GNSS ground stations is defined via inputfileStationList. Each line can contain more than one station. The first station in each line for which inputfileObservations exists and contains enough observations is used for the processing. All input files except inputfileAntennaDefinition, inputfileReceiverDefinition, and inputfileAccuracyDefinition are read for each station. The file name is interpreted as a template with the variable {station} being replaced by the station name.
The effects of loading and tidal deformation on station positions can be corrected for via loadingDisplacement and tidalDisplacement, respectively. Tidal deformations typically include:
poleTide: pole tidal deformations (IERS conventions)
poleOceanTide: ocean pole tidal deformations (IERS conventions)
LowEarthOrbiter
A single low-Earth orbiting (LEO) satellite with an onboard GNSS receiver. An apriori orbit is needed as inputfileOrbit. Attitude data must be provided via inputfileStarCamera. If no attitude data is available from the satellite operator, the star camera data can be simulated by using SimulateStarCamera.'},
'gnssTransmitterGeneratorType': { 'name': 'gnssTransmitterGeneratorType', 'key': 'gnssTransmitterGeneratorType', 'description': 'Definition and basic information of GNSS transmitters. See also GnssProcessing and GnssSimulateReceiver . A list of satellite PRNs (i.e for GPS: G01, G02, G03, ...) must be provided via . Satellite system codes follow the , see GnssType . All input files except , and are read for each satellite. The file name is interpreted as a template with the variable {prn} being replaced by the satellite PRN. Metadata input files (marked with * below) are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops . These files are regularly updated. * : PRN-SVN mapping, antenna offsets and orientations. Created via GnssAntex2AntennaDefinition or PlatformCreate . * : Antenna center variations. Created via GnssAntex2AntennaDefinition or GnssAntennaDefinitionCreate . * : Transmitted signal types. Created via GnssReceiverDefinitionCreate in case you want to define which signal types a satellite transmits. * : Scale factor of transmitted signals due to frequency offset/clock drift. Can be dreived from broadcast clocks drifts. : Converted via Sp3Format2Orbit or output of GnssProcessing . : Rotation from body frame to CRF. Created via SimulateStarCameraGnss or converted via GnssOrbex2StarCamera . : Converted via GnssClockRinex2InstrumentClock or GnssRinexNavigation2OrbitClock or output of GnssProcessing .', 'config_table': 'inputfileTransmitterList filename ascii file with transmitter PRNs, used to loop variable {prn} inputfileTransmitterInfo filename variable {prn} available inputfileAntennaDefintion filename phase centers and variations (ANTEX like) noAntennaPatternFound choice what should happen is no antenna pattern is found for an observation ignoreObservation ignore observation if no matching pattern is found useNearestFrequency use pattern of nearest frequency if no matching pattern is found throwException throw exception if no matching pattern is found inputfileSignalDefintion filename transmitted signal types inputfileClockFrequencyScale filename variable {prn} available inputfileOrbit filename variable {prn} available inputfileAttitude filename variable {prn} available inputfileClock filename variable {prn} available interpolateClock boolean linear interpolation of missing epochs interpolationDegree uint for orbit interpolation and velocity calculation', 'display_text': 'Definition and basic information of GNSS transmitters.
Metadata input files (marked with * below) are provided in GROOPS file formats at https://ftp.tugraz.at/pub/ITSG/groops. These files are regularly updated.
'},
'gnssType': { 'name': 'gnssType', 'key': 'gnssType', 'description': 'A GnssType string consists of six parts (type, frequency, attribute, system, PRN, frequency number) represented by seven characters. The first three characters (representing type, frequency, and attribute) correspond to the observation codes of the . The satellite system character also follows the RINEX 3 definition: G = GPS R = GLONASS E = Galileo C = BeiDou S = SBAS J = QZSS I = IRNSS PRN is a two-digit number identifying a satellite. Frequency number is only used for GLONASS, where the range -7 to 14 is represented by letters starting with A. Each part of a GnssType string can be replaced by a wildcard \' * \', enabling the use of these strings as patterns, for example to select a subset of observations (e.g. C**G** matches all GPS code/range observations). Trailing wildcards are optional, meaning L1*R is automatically expanded to L1*R*** . For some RINEX 2 types (e.g. Galileo L5) the RINEX 3 attribute is unknown/undefined and can be replaced by ? , for example L5?E01 . Examples: C1CG23 = code/range observation, L1 frequency, derived from C/A code, GPS, PRN 23 L2PR05B = phase observation, G2 frequency, derived from P code, GLONASS, PRN 05, frequency number -6 *5*E** = all observation types, E5a frequency, all attributes, Galileo, all PRNs', 'config_table': '', 'display_text': 'A GnssType string consists of six parts (type, frequency, attribute, system, PRN, frequency number) represented by seven characters.
The first three characters (representing type, frequency, and attribute) correspond to the observation codes of the RINEX 3 definition.
The satellite system character also follows the RINEX 3 definition:
G = GPS
R = GLONASS
E = Galileo
C = BeiDou
S = SBAS
J = QZSS
I = IRNSS
PRN is a two-digit number identifying a satellite.
Frequency number is only used for GLONASS, where the range -7 to 14 is represented by letters starting with A.
Each part of a GnssType string can be replaced by a wildcard \'*\', enabling the use of these strings as patterns, for example to select a subset of observations (e.g. C**G** matches all GPS code/range observations). Trailing wildcards are optional, meaning L1*R is automatically expanded to L1*R***. For some RINEX 2 types (e.g. Galileo L5) the RINEX 3 attribute is unknown/undefined and can be replaced by ?, for example L5?E01.
L2PR05B = phase observation, G2 frequency, derived from P code, GLONASS, PRN 05, frequency number -6
*5*E** = all observation types, E5a frequency, all attributes, Galileo, all PRNs
'},
'gravityfieldType': { 'name': 'gravityfieldType', 'key': 'gravityfieldType', 'description': 'This class computes functionals of the time depending gravity field, e.g potential, gravity anomalies or gravity gradients. If several instances of the class are given the results are summed up. Before summation every single result is multiplicated by a factor . To subtract a normal field like GRS80 from a potential to get the disturbance potential you must choose one factor by 1 and the other by -1. To get the mean of two fields just set each factor to 0.5. Some of the instances gives also information about the accuracy. The variance of the result (sum) is computed by means of variance propagation. Reads coefficients of a spherical harmonics expansion from file. The potential is given by If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The computed result is multiplied with factor . If setSigmasToZero is true the variances are set to zero. This option is only important for variance propagation and does not change the result of the gravity field functionals. Reads coefficients of a spherical harmonics expansion (for inner space) from file. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The computed result is multiplied with factor . If setSigmasToZero is true the variances are set to zero. This option is only important for error propagation and does not change the result of the gravity field functionals. Reads a solution vector from file which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE ). The coefficients of the vector are interpreted from position indexStart (counting from zero) with help of . If the solution file contains solution of several right hand sides you can choose one with number rightSide (counting from zero). You can also read a vector from file containing the accuracies of the coefficients. The computed result is multiplied with factor . Read a time variable gravity field from file represented by a spherical harmonics expansion in the spatial domain and spline functions in the time domain. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. This file can be created for example by Gravityfield2TimeSplines or PotentialCoefficients2BlockMeanTimeSplines . The computed result is multiplied with factor . The given is interpreted as trend function and the result is computed at time as follows with is timeStart and is timeStep . The given is interpreted as oscillation function and the result is computed at time as follows with . A is only evaluated in the interval between timeStart inclusively and timeEnd exclusively. Outside the interval the result is zero. This class is useful to get a time series of monthly mean GRACE gravity field solutions. In each month another file of potentialCoefficients is valid. This can easily be created with . Treat as gravitational forces. The tides need a realization of to transform between the CRF and TRF and to compute rotational deformation from polar motion. It also needs from Sun, moon, and planets. The gravity is integrated from a topographic mass distribution. For each grid point in a prisma with density is assumed. The horizontal extension is computed from the grid spacing and the vertical extension is given by radialLowerBound and radialUpperBound above ellipsoid. All values are expressions and computed for each point with given data in the grid file. The standard variables for grids are available, see dataVariables . Example: The grid file contains the orthometric height of the topography in the first column, the geoid height in the second and the mean density of each prism in the third column. In this case the following settings should be used: radialUpperBound = data0+data1 , radialLowerBound = data1 , density = data2 . As the prim computation is time consuming a maximum distance around the evaluation point can defined with distancePrism . Afterwards a simplified radial line (the prism mass is concentrated to a line in the center) is used up to a distance of distanceLine . At last the prim is approximated by a point mass in the center up to a distance distanceMax (if set). Prisms nearby the evaluation point can be excluded with distanceMin . The given is interpreted as an oscillation function in the gravitational potential field, caused by large earthquakes. The result is computed at time as follows: with . In this equation, is the attenuation factor, is the overtone factor, is degree, is order, and is time in second. and are computed with the elastic Earth model or observed from the long period record of superconducting gravimeter measurements after the earthquakes. Convert to spherical harmonics and the coefficients. Groups a set of and has no further effect itself.', 'config_table': 'inputfilePotentialCoefficients filename minDegree uint maxDegree uint factor double the result is multiplied by this factor, set -1 to subtract the field setSigmasToZero boolean set variances to zero, should be used by adding back reference fields inputfilePotentialCoefficients filename minDegree uint maxDegree uint factor double the result is multiplied by this factor, set -1 to subtract the field setSigmasToZero boolean set variances to zero, should be used by adding back reference fields parametrization parametrizationGravityType inputfileSolution filename solution vector inputfileSigmax filename standards deviations or covariance matrix of the solution indexStart uint position in the solution vector rightSide uint if solution contains several right hand sides, select one factor double the result is multiplied by this factor, set -1 to subtract the field inputfileTimeSplinesGravityfield filename inputfileTimeSplinesCovariance filename minDegree uint maxDegree uint factor double the result is multiplied by this factor, set -1 to subtract the field gravityfield gravityfieldType this field is multiplicated by (time-time0)/timeStep timeStart time reference time timeStep time gravityfieldCos gravityfieldType multiplicated by cos(2pi/T(time-time0)) gravityfieldSin gravityfieldType multiplicated by sin(2pi/T(time-time0)) time0 time reference time period time [day] gravityfield gravityfieldType timeStart time first point in time timeEnd time last point in time will be less or equal timeEnd tides tidesType earthRotation earthRotationType ephemerides ephemeridesType inputfileGridRectangular filename Digital Terrain Model density expression expression [kg/m**3] radialUpperBound expression expression (variables \'height\', \'data\', \'L\', \'B\' and, \'area\' are taken from the gridded data radialLowerBound expression expression (variables \'height\', \'data\', \'L\', \'B\' and, \'area\' are taken from the gridded data distanceMin double [km] min. influence distance (ignore near zone) distancePrism double [km] max. distance for prism formular distanceLine double [km] max. distance for radial integration distanceMax double [km] max. influence distance (ignore far zone) factor double the result is multiplied by this factor, set -1 to subtract the field inputCoefficientMatrix filename oscillation model parameters time0 time the time earthquake happened minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius gravityfield gravityfieldType filter sphericalHarmonicsFilterType gravityfield gravityfieldType factor double the result is multiplied by this factor, set -1 to subtract the field', 'display_text': 'This class computes functionals of the time depending gravity field, e.g potential, gravity anomalies or gravity gradients.
If several instances of the class are given the results are summed up. Before summation every single result is multiplicated by a factor. To subtract a normal field like GRS80 from a potential to get the disturbance potential you must choose one factor by 1 and the other by -1. To get the mean of two fields just set each factor to 0.5.
Some of the instances gives also information about the accuracy. The variance of the result (sum) is computed by means of variance propagation.
PotentialCoefficients
Reads coefficients of a spherical harmonics expansion from file. The potential is given by \\[ V(\\lambda,\\vartheta,r) = \\frac{GM}{R}\\sum_{n=0}^\\infty \\sum_{m=0}^n \\left(\\frac{R}{r}\\right)^{n+1} \\left(c_{nm} C_{nm}(\\lambda,\\vartheta) + s_{nm} S_{nm}(\\lambda,\\vartheta)\\right). \\]If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The computed result is multiplied with factor. If setSigmasToZero is true the variances are set to zero. This option is only important for variance propagation and does not change the result of the gravity field functionals.
PotentialCoefficientsInterior
Reads coefficients of a spherical harmonics expansion (for inner space) from file. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. The computed result is multiplied with factor. If setSigmasToZero is true the variances are set to zero. This option is only important for error propagation and does not change the result of the gravity field functionals.
FromParametrization
Reads a solution vector from file inputfileSolution which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE). The coefficients of the vector are interpreted from position indexStart (counting from zero) with help of parametrizationGravity. If the solution file contains solution of several right hand sides you can choose one with number rightSide (counting from zero). You can also read a vector from file inputfileSigmax containing the accuracies of the coefficients.
The computed result is multiplied with factor.
TimeSplines
Read a time variable gravity field from file inputfileTimeSplinesGravityfield represented by a spherical harmonics expansion in the spatial domain and spline functions in the time domain. If set the expansion is limited in the range between minDegree and maxDegree inclusivly.
The given gravityfield is interpreted as trend function and the result is computed at time $t$ as follows \\[ V(\\M x,t) = \\frac{t-t_0}{\\Delta t}V(\\M x), \\]with $t_0$ is timeStart and $\\Delta t$ is timeStep.
Oscillation
The given gravityfield is interpreted as oscillation function and the result is computed at time $t$ as follows \\[ V(\\M x,t) = \\cos(\\omega)V_{cos}(\\M x)+\\sin(\\omega)V_{sin}(\\M x), \\]with $\\omega=\\frac{2\\pi}{T}(t-t_0)$.
InInterval
A gravityfield is only evaluated in the interval between timeStart inclusively and timeEnd exclusively. Outside the interval the result is zero.
This class is useful to get a time series of monthly mean GRACE gravity field solutions. In each month another file of potentialCoefficients is valid. This can easily be created with loop.
Tides
Treat tides as gravitational forces. The tides need a realization of earthRotation to transform between the CRF and TRF and to compute rotational deformation from polar motion. It also needs ephemerides from Sun, moon, and planets.
Topography
The gravity is integrated from a topographic mass distribution. For each grid point in inputfileGridRectangular a prisma with density is assumed. The horizontal extension is computed from the grid spacing and the vertical extension is given by radialLowerBound and radialUpperBound above ellipsoid. All values are expressions and computed for each point with given data in the grid file. The standard variables for grids are available, see dataVariables.
Example: The grid file contains the orthometric height of the topography in the first column, the geoid height in the second and the mean density of each prism in the third column. In this case the following settings should be used:
radialUpperBound = data0+data1,
radialLowerBound = data1,
density = data2.
As the prim computation is time consuming a maximum distance around the evaluation point can defined with distancePrism. Afterwards a simplified radial line (the prism mass is concentrated to a line in the center) is used up to a distance of distanceLine. At last the prim is approximated by a point mass in the center up to a distance distanceMax (if set). Prisms nearby the evaluation point can be excluded with distanceMin.
EarthquakeOscillation
The given gravityfield is interpreted as an oscillation function in the gravitational potential field, caused by large earthquakes. The result is computed at time $t$ as follows: \\[ C_{lm}(\\M t) = \\sum_{n=0}^NC_{nlm}(1-\\cos(\\omega)\\exp(\\frac{-\\omega}{2Q_{nlm}})), \\]with $\\omega=\\frac{2\\pi}{T_{nlm}}(t-t_0)$. In this equation, $Q_{nlm}$ is the attenuation factor, $n$ is the overtone factor, $m$ is degree, $l$ is order, and $t$ is time in second. $T_{nlm}$ and $Q_{nlm}$ are computed with the elastic Earth model or observed from the long period record of superconducting gravimeter measurements after the earthquakes.
Filter
Convert gravityfield to spherical harmonics and filter the coefficients.
Group
Groups a set of gravityfield and has no further effect itself.'},
'gridType': { 'name': 'gridType', 'key': 'gridType', 'description': 'This class generates a set of grid points. In a first step, the grid is always generated globally, with a regional subset of points can be extracted from the global grid. The parameters R and inverseFlattening define the shape of the ellipsoid on which the grid is generated. In case inverseFlattening is chosen as zero, a sphere is used. With height the distance of the points above the ellipsoid can be defined. In addition to the location of the points, weights are assigned to each of the points. These weights can be regarded as the surface element associated with each grid point. The geographical grid is an equal-angular point distribution with points located along meridians and along circles of latitude. deltaLambda denotes the angular difference between adjacent points along meridians and deltaPhi describes the angular difference between adjacent points along circles of latitude. The point setting results as follows: The number of grid points can be determined by The weights are calculated according to The zeroth level of densification coincides with the 12 icosahedron vertices, as displayed in the upper left part of Fig. . Then, depending on the envisaged densification, each triangle edge is divided into parts, illustrated in the upper right part of Fig. . The new nodes on the edges are then connected by arcs of great circles parallel to the triangle edges. The intersections of each three corresponding parallel lines become nodes of the densified grid as well. As in case of a spherical triangle those three connecting lines do not exactly intersect in one point, the center of the resulting triangle is used as location for the new node (lower left part of Fig. ). The lower right side of Fig. finally shows the densified triangle vertex grid for a level of . The number of grid points in dependence of the chosen level of densification can be calculated by The points of the zeroth level are located at the centers of the icosahedron triangles. To achieve a finer grid, each of the triangles is divided into four smaller triangles by connecting the midpoints of the triangle edges. The refined grid points are again located at the center of the triangles. Subsequently, the triangles can be further densified up to the desired level of densification , which is defined by level . The number of global grid points for a certain level can be determined by Thus the quantity of grid points depends exponentially on the level , as with every additional level the number of grid points quadruplicates. The grid features equiangular spacing along circles of latitude with parallelsCount defining the number of the parallels. Along the meridians the points are located at parallels at the zeros of the Legendre polynomial of degree , Consequently, the number of grid points sums up to The weights can be calculated according to The Reuter grid features equi-distant spacing along the meridians determined by the control parameter according to Thus denotes the number of points per meridian, as the two poles are included in the point distribution as well. Along the circles of latitude, the number of grid points decreases with increasing latitude in order to achieve an evenly distributed point pattern. This number is chosen, so that the points along each circle of latitude have the same spherical distance as two adjacent latitudes. The resulting relationship is given by The left hand side of this equation is the spherical distance between adjacent latitudes, the right hand side stands for the spherical distance between two points with the same polar distance and a longitudinal difference of . This longitudinal distance can be adjusted depending on to fulfill Eq. . The resulting formula for is The number of points for each circle of latitude can then be determined by Here the Gauss bracket specifies the largest integer equal to or less than . The longitudes are subsequently determined by The number of grid points can be estimated by The results from the fact that the are restricted to integer values. This kind of grid distributes an arbitrarily chosen number of points (defined by globalPointsCount ) following a recursive, quasi random sequence. In longitudinal direction the pattern follows This implies that every grid point features a unique longitude, with equi-angular longitudinal differences. The polar distance in the form for each point is determined by the following recursive sequence: Starting from an interval . If , then the midpoint of the interval is returned as result of the sequence, and the sequence is terminated. If the number of points is uneven, the midpoint is included into the list of . Subsequently, the interval is bisected into an upper and lower half, and the sequence is called for both halves. from upper and lower half are alternately sorted into the list of . The polar distances are calculated by The Driscoll-Healy grid, has equiangular spacing along the meridians as well as along the circles of latitude. In longitudinal direction (along the parallels), these angular differences for a given dimension coincide with those described for the corresponding geographical grid and Gauss grid. Along the meridians, the size of the latitudinal differences is half the size compared to the geographical grid. This results in the following point pattern, Consequently, the number of grid points is The weights are given by Creates one single point. Creates one single point. In this class grid is read from a file, which is given by . A corresponding file can be generated with GriddedDataCreate or with Matrix2GriddedData .', 'config_table': 'deltaLambda angle deltaPhi angle height double ellipsoidal height expression (variables \'height\', \'L\', \'B\') R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType level uint division of icosahedron, point count = 10*(n+1)**2+2 R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType level uint division of icosahedron, point count = 5*4**(n+1) R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType parallelsCount uint R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType gamma uint number of parallels height double ellipsoidal height R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType globalPointsCount uint height double ellipsoidal height R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType dimension uint number of parallels = 2*dimension height double ellipsoidal height R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere border borderType L angle longitude B angle latitude height double ellipsoidal height area double associated area element on unit sphere R double major axsis of the ellipsoid/sphere inverseFlattening double flattening of the ellipsoid, 0: sphere x double [m] y double [m] z double [m] area double associated area element on unit sphere inputfileGrid filename border borderType', 'display_text': 'This class generates a set of grid points. In a first step, the grid is always generated globally, with border a regional subset of points can be extracted from the global grid. The parameters R and inverseFlattening define the shape of the ellipsoid on which the grid is generated. In case inverseFlattening is chosen as zero, a sphere is used. With height the distance of the points above the ellipsoid can be defined. In addition to the location of the points, weights are assigned to each of the points. These weights can be regarded as the surface element associated with each grid point.
Geograph
The geographical grid is an equal-angular point distribution with points located along meridians and along circles of latitude. deltaLambda denotes the angular difference between adjacent points along meridians and deltaPhi describes the angular difference between adjacent points along circles of latitude. The point setting results as follows: \\[ \\lambda_i=\\frac{\\Delta\\lambda}{2}+i\\cdot\\Delta\\lambda\\qquad\\mbox{with}\\qquad 0\\leq i< \\frac{360^\\circ}{\\Delta\\lambda}, \\]\\[ \\varphi_j=-90^\\circ+\\frac{\\Delta\\varphi}{2}+j\\cdot\\Delta\\varphi\\qquad\\mbox{with}\\qquad 0\\leq j<\\frac{180^\\circ}{\\Delta\\varphi}. \\]The number of grid points can be determined by \\[ I=\\frac{360^\\circ}{\\Delta\\lambda}\\cdot\\frac{180^\\circ}{\\Delta\\varphi}. \\]The weights are calculated according to \\[ w_i=\\int\\limits_{\\lambda_i-\\frac{\\Delta\\lambda}{2}}^{\\lambda_i+\\frac{\\Delta\\lambda}{2}}\\int\\limits_{\\vartheta_i-\\frac{\\Delta\\vartheta}{2}}^{\\vartheta_i+\\frac{\\Delta\\vartheta}{2}}=2\\cdot\\Delta\\lambda\\sin(\\Delta\\vartheta)\\sin(\\vartheta_i). \\]
TriangleVertex
The zeroth level of densification coincides with the 12 icosahedron vertices, as displayed in the upper left part of Fig. fig:triangle_grid. Then, depending on the envisaged densification, each triangle edge is divided into $n$ parts, illustrated in the upper right part of Fig. fig:triangle_grid. The new nodes on the edges are then connected by arcs of great circles parallel to the triangle edges. The intersections of each three corresponding parallel lines become nodes of the densified grid as well. As in case of a spherical triangle those three connecting lines do not exactly intersect in one point, the center of the resulting triangle is used as location for the new node (lower left part of Fig. fig:triangle_grid). The lower right side of Fig. fig:triangle_grid finally shows the densified triangle vertex grid for a level of $n=3$. The number of grid points in dependence of the chosen level of densification can be calculated by \\[\\label{eq:numberVertex} I=10\\cdot(n+1)^2+2. \\]
TriangleCenter
The points of the zeroth level are located at the centers of the icosahedron triangles. To achieve a finer grid, each of the triangles is divided into four smaller triangles by connecting the midpoints of the triangle edges. The refined grid points are again located at the center of the triangles. Subsequently, the triangles can be further densified up to the desired level of densification $n$, which is defined by level.
The number of global grid points for a certain level can be determined by \\[\\label{eq:numberCenter} I=20\\cdot 4^n. \\]Thus the quantity of grid points depends exponentially on the level $n$, as with every additional level the number of grid points quadruplicates.
Gauss
The grid features equiangular spacing along circles of latitude with parallelsCount defining the number $L$ of the parallels. \\[ \\Delta\\lambda=\\frac{\\pi}{L}\\qquad\\Rightarrow\\qquad\\lambda_i=\\frac{\\Delta\\lambda}{2}+i\\cdot\\Delta\\lambda\\qquad\\mbox{with}\\qquad 0\\leq i< 2L. \\]Along the meridians the points are located at $L$ parallels at the $L$ zeros $\\vartheta_j$ of the Legendre polynomial of degree $L$, \\[ P_L(\\cos\\vartheta_j)=0. \\]Consequently, the number of grid points sums up to \\[ I=2\\cdot L^2. \\]The weights can be calculated according to \\[ w_i(L)=\\Delta\\lambda\\frac{2}{(1-t_i^2)(P\'_{L}(\\cos(\\vartheta _i)))^2},\\label{weights} \\]
Reuter
The Reuter grid features equi-distant spacing along the meridians determined by the control parameter $\\gamma$ according to \\[ \\Delta\\vartheta=\\frac{\\pi}{\\gamma}\\qquad\\Rightarrow\\vartheta_j=j\\Delta\\vartheta,\\qquad\\mbox{with}\\qquad 1\\leq j\\leq \\gamma-1. \\]Thus $\\gamma+1$ denotes the number of points per meridian, as the two poles are included in the point distribution as well. Along the circles of latitude, the number of grid points decreases with increasing latitude in order to achieve an evenly distributed point pattern. This number is chosen, so that the points along each circle of latitude have the same spherical distance as two adjacent latitudes. The resulting relationship is given by \\[\\label{eq:sphericalDistance} \\Delta\\vartheta=\\arccos\\left( \\cos^2\\vartheta_j+\\sin^2\\vartheta_j\\cos\\Delta\\lambda_j\\right). \\]The left hand side of this equation is the spherical distance between adjacent latitudes, the right hand side stands for the spherical distance between two points with the same polar distance $\\vartheta_j$ and a longitudinal difference of $\\Delta\\lambda_i$. This longitudinal distance can be adjusted depending on $\\vartheta_j$ to fulfill Eq. \\eqref{eq:sphericalDistance}. The resulting formula for $\\Delta\\lambda_i$ is \\[\\label{eq:deltaLambdai} \\Delta\\lambda_j=\\arccos\\left( \\frac{\\sin\\Delta\\vartheta -\\cos^2\\vartheta_j}{\\sin^2\\vartheta_j}\\right). \\]The number of points $\\gamma_j$ for each circle of latitude can then be determined by \\[\\label{eq:gammai} \\gamma_j=\\left[ \\frac{2\\pi}{\\Delta\\lambda_j}\\right] . \\]Here the Gauss bracket $[x]$ specifies the largest integer equal to or less than $x$. The longitudes are subsequently determined by \\[ \\lambda_{ij}=\\frac{\\Delta\\lambda_j}{2}+i\\cdot(2\\pi/\\gamma_j),\\qquad\\mbox{with}\\qquad 0\\leq i< \\gamma_j. \\]The number of grid points can be estimated by \\[\\label{eq:numberReuter} I=\\leq 2+\\frac{4}{\\pi}\\gamma^2, \\]The $\\leq$ results from the fact that the $\\gamma_j$ are restricted to integer values.
Corput
This kind of grid distributes an arbitrarily chosen number of $I$ points (defined by globalPointsCount) following a recursive, quasi random sequence. In longitudinal direction the pattern follows \\[ \\Delta\\lambda=\\frac{2\\pi}{I}\\qquad\\Rightarrow\\qquad\\frac{\\Delta\\lambda}{2}+\\lambda_i=i\\cdot\\Delta\\lambda\\qquad\\mbox{with}\\qquad 1\\leq i\\leq I. \\]This implies that every grid point features a unique longitude, with equi-angular longitudinal differences.
The polar distance in the form $t_i=\\cos\\vartheta_i$ for each point is determined by the following recursive sequence:
Starting from an interval $t\\in[-1,1]$.
If $I=1$, then the midpoint of the interval is returned as result of the sequence, and the sequence is terminated.
If the number of points is uneven, the midpoint is included into the list of $t_i$.
Subsequently, the interval is bisected into an upper and lower half, and the sequence is called for both halves.
$t$ from upper and lower half are alternately sorted into the list of $t_i$.
The polar distances are calculated by \\[ \\vartheta_i=\\arccos\\, t_i. \\]
Driscoll
The Driscoll-Healy grid, has equiangular spacing along the meridians as well as along the circles of latitude. In longitudinal direction (along the parallels), these angular differences for a given dimension $L$ coincide with those described for the corresponding geographical grid and Gauss grid. Along the meridians, the size of the latitudinal differences is half the size compared to the geographical grid. This results in the following point pattern, \\[ \\begin{split} \\Delta\\lambda=\\frac{\\pi}{L}\\qquad&\\Rightarrow\\qquad\\lambda_i=\\frac{\\Delta\\lambda}{2}+i\\cdot\\Delta\\lambda\\qquad&\\mbox{with}\\qquad 0\\leq i< 2L, \\\\ \\Delta\\vartheta=\\frac{\\pi}{2L}\\qquad&\\Rightarrow\\qquad\\vartheta_j=j\\cdot\\Delta\\vartheta\\qquad&\\mbox{with}\\qquad 1\\leq j\\leq 2L. \\end{split} \\]Consequently, the number of grid points is \\[ I=4\\cdot L^2. \\]The weights are given by \\[ w_i=\\Delta\\lambda\\frac{4}{2L}\\sin(\\vartheta_i)\\sum_{l=0}^{L-1}\\frac{\\sin\\left[ (2l+1)\\;\\vartheta_i\\right] }{2l+1}. \\]
SinglePoint
Creates one single point.
SinglePointCartesian
Creates one single point.
File
In this class grid is read from a file, which is given by inputfileGrid. A corresponding file can be generated with GriddedDataCreate or with Matrix2GriddedData.'},
'instrumentTypeType': { 'name': 'instrumentTypeType', 'key': 'instrumentTypeType', 'description': 'Defines the type of an instrument file .', 'config_table': 'instrumentTypeType choice instrument type INSTRUMENTTIME time without data MISCVALUE single value MISCVALUES multiple values VECTOR3D x, y, z COVARIANCE3D xx, yy, zz, xy, xz, yz ORBIT position [m], velocity [m/s], acceleration [m/s^2] (each x, y, z) STARCAMERA quaternions (q0, qx, qy, qz) ACCELEROMETER x, y, z [m/s^2] SATELLITETRACKING range [m], range rate [m/s], range acceleration [m/s^2] GRADIOMETER xx, yy, zz, xy, xz, yz [1/s^2] GNSSRECEIVER GNSS phase/code observations [m] OBSERVATIONSIGMA accuracy SATELLITELASERRANGING range [m], accuracy [m], redundancy, window [s], wavelength [m], azimuth [rad], elevation [rad] METEOROLOGICAL temperature [K], pressure [Pa], humidity [%], windSpeed [m/s], radiation [W/m^2], precip. [mm/d] MASS THRUSTER MAGNETOMETER ACCHOUSEKEEPING', 'display_text': 'Defines the type of an instrument file.'},
'interpolatorTimeSeriesType': { 'name': 'interpolatorTimeSeriesType', 'key': 'interpolatorTimeSeriesType', 'description': 'This class resamples data of a times series to new poins in time. Polynomial prediction using a moving polynomial of polynomialDegree . The optimal polynomial is chosen based on the centricity of the data points around the resampling point and the distance to all polynomial data points. All polynomial data points must be within maxDataPointRange . Resampling points within maxExtrapolationDistance of the polynomial will be extrapolated. The elements maxDataPointRange and maxExtrapolationDistance are given in the unit of seconds. If negative values are used, the unit is relative to the median input sampling. A polynomial of polynomialDegree is estimated using all data points within maxDataPointDistance of the resampling point. This polynomial is then used to predict the resampling point. A resampling point will be extrapolated if there are only data points before/after as long as the closest one is within maxExtrapolationDistance . The elements maxDataPointDistance and maxExtrapolationDistance are given in the unit of seconds. If negative values are used, the unit is relative to the median input sampling.', 'config_table': 'polynomialDegree uint degree of the moving polynomial maxDataPointRange double [seconds] all degree+1 data points must be within this range for a valid polynomial maxExtrapolationDistance double [seconds] resampling points within this range of the polynomial will be extrapolated polynomialDegree uint degree of the estimated polynomial maxDataPointDistance double [seconds] all data points within this distance around the resampling point will be used maxExtrapolationDistance double [seconds] resampling points within this range of the polynomial will be extrapolated polynomialDegree uint degree of the estimated polynomial maxDataGap double [seconds] max data gap to interpolate maxDataSpan double [seconds] time span on each side used for least squares fit margin double [seconds] margin for identical times', 'display_text': 'This class resamples data of a times series to new poins in time.
Polynomial
Polynomial prediction using a moving polynomial of polynomialDegree. The optimal polynomial is chosen based on the centricity of the data points around the resampling point and the distance to all polynomial data points. All polynomial data points must be within maxDataPointRange. Resampling points within maxExtrapolationDistance of the polynomial will be extrapolated. The elements maxDataPointRange and maxExtrapolationDistance are given in the unit of seconds. If negative values are used, the unit is relative to the median input sampling.
Least squares polynomial fit
A polynomial of polynomialDegree is estimated using all data points within maxDataPointDistance of the resampling point. This polynomial is then used to predict the resampling point. A resampling point will be extrapolated if there are only data points before/after as long as the closest one is within maxExtrapolationDistance. The elements maxDataPointDistance and maxExtrapolationDistance are given in the unit of seconds. If negative values are used, the unit is relative to the median input sampling.
Fill gaps with least squares polynomial fit
'},
'kernelType': { 'name': 'kernelType', 'key': 'kernelType', 'description': 'Kernel defines harmonic isotropic integral kernels . where is the (disturbance)potential and is a functional on the spherical surface . The Kernel can be exapanded into a series of (fully normalized) legendre polynomials On the one hand the kernel defines the type of the functionals that are measured or have to be computed, e.g. gravity anomalies given by the Stokes-kernel. On the other hand the kernel functions can be used as basis functions to represent the gravity field, e.g. as spline functions or wavelets. The geoid height is defined by Bruns formula with the disturbance potential and the normal gravity and where is the ellipsoidal height in meter and the longitude. The kernel is given by and the coefficients in are Gravity anomalies in linearized form are defined by The Stokes kernel is given by and the coefficients in are Gravity disturbances in linearized form are defined by The Hotine kernel is given by and the coefficients in are The Abel-Poisson kernel is given by and the coefficients in are This kernel defines a point mass or mass on a single layer ( -kernel) taking the effect of the loading into account. The coefficients of the kernel defined in are where is the gravitational constant and are the load Love numbers. Height of equivalent water columns taking the effect of the loading into account. The coefficients of the kernel defined in are where is the gravitational constant, is the density of water and are the load Love numbers. Ocean bottom pressure caused by water and atmosphere masses columns taking the effect of the loading into account. The coefficients of the kernel defined in are where is the gravitational constant, is the normal gravity and are the load Love numbers. Computes the radial deformation caused by loading. The coefficients of the kernel defined in are where is the normal gravity defined in , and are the load Love numbers and the load deformation Love numbers. This kernel defines the second radial derivative of the (disturbance) potential. The coefficients of the kernel defined in are The kernel is defined by the coefficients given by file. Another is smoothed by a gauss filter which is defined by with where is the given smoothing radius in km and km is the Earth radius. The coefficients of the kernel are multiplicated by Another is smoothed by a Blackman low-pass filter. The filter is defined through the beginning and end of the transition from pass-band to stop-band. This transition band is specified by startDegreeTransition ( ) and stopDegreeTransition ( ). The coefficients of this kernel are defined as with Another is truncated before minDegree and after maxDegree . The coefficients of this kernel are defined as The selenoid height is defined by Bruns formula with the disturbance potential and the normal gravity of the moon. The kernel is given by and the coefficients in are', 'config_table': 'inputfileLoadingLoveNumber filename density double [kg/m**3] inputfileLoadingLoveNumber filename inputfileLoadingLoveNumber filename inputfileDeformationLoadLoveNumber filename inputfilePotentialLoadLoveNumber filename if full potential is given and not only loading potential inputfileCoefficients filename kernel kernelType radius double filter radius [km] kernel kernelType startDegreeTransition uint minimum degree in transition band stopDegreeTransition uint maximum degree in transition band kernel kernelType minDegree uint truncate before minDegree maxDegree uint truncate after maxDegree', 'display_text': 'Kernel defines harmonic isotropic integral kernels $K$. \\[ T(P) = \\frac{1}{4\\pi}\\int_\\Omega K(P,Q)\\cdot f(Q)\\,d\\Omega(Q), \\]where $T$ is the (disturbance)potential and $f$ is a functional on the spherical surface $\\Omega$. The Kernel can be exapanded into a series of (fully normalized) legendre polynomials \\[\\label{eq.kernel} K(\\cos\\psi,r,R) = \\sum_n \\left(\\frac{R}{r}\\right)^{n+1} k_n\\sqrt{2n+1}\\bar{P}_n(\\cos\\psi). \\]On the one hand the kernel defines the type of the functionals $f$ that are measured or have to be computed, e.g. gravity anomalies given by the Stokes-kernel. On the other hand the kernel functions can be used as basis functions to represent the gravity field, e.g. as spline functions or wavelets.
GeoidHeight
The geoid height is defined by Bruns formula \\[ N = \\frac{1}{\\gamma}T \\]with $T$ the disturbance potential and the normal gravity \\[\\label{normalgravity} \\gamma = \\gamma_0 - 0.30877\\cdot 10^{-5}/s^2(1-0.00142\\sin^2(B))h \\]and \\[ \\gamma_0 = 9.780327\\,m/s^2(1+0.0053024\\sin^2(B)-0.0000058\\sin^2(2B)) \\]where $h$ is the ellipsoidal height in meter and $B$ the longitude.
The kernel is given by \\[ K(\\cos\\psi,r,R) = \\gamma\\frac{R(r^2-R^2)}{l^3}, \\]and the coefficients in \\eqref{eq.kernel} are \\[ k_n = \\gamma. \\]
Anomalies
Gravity anomalies in linearized form are defined by \\[ \\Delta g = -\\frac{\\partial T}{\\partial r}-\\frac{2}{r}T. \\]The Stokes kernel is given by \\[ K(\\cos\\psi,r,R) = \\frac{2R^2}{l}-3\\frac{Rl}{r^2}-\\frac{R^2}{r^2}\\cos\\psi \\left(5+3\\ln\\frac{l+r-R\\cos\\psi}{2r}\\right), \\]and the coefficients in \\eqref{eq.kernel} are \\[ k_n = \\frac{R}{n-1}. \\]
Disturbance
Gravity disturbances in linearized form are defined by \\[ \\delta g = -\\frac{dT}{dr}. \\]The Hotine kernel is given by \\[ K(\\cos\\psi,r,R) = \\frac{2R^2}{l}-R\\ln\\frac{l+R-r\\cos\\psi}{r(1-\\cos\\psi)}, \\]and the coefficients in \\eqref{eq.kernel} are \\[ k_n = \\frac{R}{n+1}. \\]
Potential
The Abel-Poisson kernel is given by \\[ K(\\cos\\psi,r,R) = \\frac{R(r^2-R^2)}{l^3}, \\]and the coefficients in \\eqref{eq.kernel} are \\[ k_n = 1. \\]
Density
This kernel defines a point mass or mass on a single layer ($1/l$-kernel) taking the effect of the loading into account.
The coefficients of the kernel defined in \\eqref{eq.kernel} are \\[ k_n = 4\\pi G R\\frac{1+k_n\'}{2n+1}, \\]where $G$ is the gravitational constant and $k_n\'$ are the load Love numbers.
WaterHeight
Height of equivalent water columns taking the effect of the loading into account.
The coefficients of the kernel defined in \\eqref{eq.kernel} are \\[ k_n = 4\\pi G \\rho R\\frac{1+k_n\'}{2n+1}, \\]where $G$ is the gravitational constant, $\\rho$ is the density of water and $k_n\'$ are the load Love numbers.
BottomPressure
Ocean bottom pressure caused by water and atmosphere masses columns taking the effect of the loading into account.
The coefficients of the kernel defined in \\eqref{eq.kernel} are \\[ k_n = \\frac{4\\pi G R }{\\gamma}\\frac{1+k_n\'}{2n+1}, \\]where $G$ is the gravitational constant, $\\gamma$ is the normal gravity and $k_n\'$ are the load Love numbers.
Deformation
Computes the radial deformation caused by loading.
The coefficients of the kernel defined in \\eqref{eq.kernel} are \\[ k_n = \\gamma\\frac{1+k_n\'}{h_n\'}, \\]where $\\gamma$ is the normal gravity defined in \\eqref{normalgravity}, $h_n\'$ and $k_n\'$ are the load Love numbers and the load deformation Love numbers.
RadialGradient
This kernel defines the second radial derivative of the (disturbance) potential. \\[ T_{rr} = \\frac{\\partial^2 T}{\\partial r^2}. \\]The coefficients of the kernel defined in \\eqref{eq.kernel} are \\[ k_n = \\frac{r^2}{(n+1)(n+2)}. \\]
Coefficients
The kernel is defined by the coefficients $k_n$ given by file.
FilterGauss
Another kernel is smoothed by a gauss filter which is defined by \\[ F(\\cos\\psi) = \\frac{b\\cdot e^{-b(1-\\cos\\psi)}}{1-e^{-2b}} \\]with $b = \\frac{ln(2)}{1-\\cos(r/R)}$ where $r$ is the given smoothing radius in km and $R=6378.1366$ km is the Earth radius. The coefficients $k_n$ of the kernel are multiplicated by \\[ f_n = \\frac{1}{2n+1} \\int_{-1}^1 F(t)\\cdot \\bar{P}_n(t)\\,dt. \\]
BlackmanLowpass
Another kernel is smoothed by a Blackman low-pass filter. The filter is defined through the beginning and end of the transition from pass-band to stop-band. This transition band is specified by startDegreeTransition ($n_1$) and stopDegreeTransition ($n_2$).
The coefficients of this kernel are defined as \\[ \\begin{cases} 1 & \\text{for } n < n_1 \\\\ A_n^2 & \\text{for } n_1\\leq n \\leq n_2 \\\\ 0 & \\text{for } n > n_2 \\\\ \\end{cases} \\]with \\[ A_n = 0.42 + 0.5\\cos(\\pi \\frac{n-n_1}{n_2-n_1}) + 0.08 \\cos(2\\pi\\frac{n-n_1}{n_2-n_1}). \\]
Truncation
Another kernel is truncated before minDegree and after maxDegree. The coefficients of this kernel are defined as \\[ k_n = \\begin{cases} 1 & \\text{for } n_{\\text{minDegree}} \\leq n \\leq n_{\\text{maxDegree}}\\\\ 0 & \\text{else.} \\\\ \\end{cases} \\]
SelenoidHeight
The selenoid height is defined by Bruns formula \\[ N = \\frac{1}{\\gamma}T \\]with $T$ the disturbance potential and the normal gravity $\\gamma=\\frac{GM}{R^2}$ of the moon.
The kernel is given by \\[ K(\\cos\\psi,r,R) = \\gamma\\frac{R(r^2-R^2)}{l^3}, \\]and the coefficients in \\eqref{eq.kernel} are \\[ k_n = \\gamma. \\]'},
'loopType': { 'name': 'loopType', 'key': 'loopType', 'description': 'Generates a sequence with variables to loop over. The variable names can be set with variableLoop... and the current values are assigned to the variables for each loop step. With only a subset of loop steps are performed. The variableLoopIndex and variableLoopCount are not affected by the condition. The result would therefore be the same as using LoopPrograms with a nested IfPrograms . See Loop and conditions for usage. Loop over points in time. Loop over the intervals between points in time. Loop over list of strings. Loop over row s of a table containing strings. Each row must have the same number of cells. For each column an extra variableLoopString can be defined. Loop over list of strings from files . Loop over rows of a table containing strings. Each row must have the same number of columns. Loop over lines of a text file. Loop over rows of a matrix . To define the loop variables the standard data variables of the matrix are available, see dataVariables . Loop over sequence of numbers. Loop over files of a directory. Loop over lines of command output. Loop over specific equipment of a platform file . Loop over nested loops. First loop is outermost loop, every subsequent loop is one level below the previous loop . Perform the in the alphabetically order defined by the evaluated sortString for each loop step. So the string must contain loop variables. If sortString is empty, no sorting will take place. Example: The sortString = {loopTime:%m} of a time series sorts the times in ascending order by month. The same principle is used to remove duplicates. If different loop steps evaluates removeDuplicatesString to the same string, only the first loop step is executed. DEPRECATED since 2025-09-27. Use LoopStringTable instead. DEPRECATED since 2022-11-11. Use LoopPlatformEquipment instead.', 'config_table': 'timeSeries timeSeriesType loop is called for every point in time variableLoopTime string variable with time of each loop variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step timeIntervals timeSeriesType loop is called for every interval variableLoopTimeStart string variable with starting time of each interval variableLoopTimeEnd string variable with ending time of each interval variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step string string explicit list of strings variableLoopString string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step row sequence rows of a table cell string list of columns in a row transpose boolean loop over columns instead of rows variableLoopString string 1. variable name for the 1. column, next variable name for the 2. column, ... variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfile filename string list file variableLoopString string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfile filename string table file with multiple columns transpose boolean loop over columns instead of rows variableLoopString string 1. variable name for the 1. column, next variable name for the 2. column, ... variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfile filename simple text file with lines startIndex uint start at element startIndex (counting from 0) count uint use number of loops only (default: use all) variableLoopLine string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfile filename transpose boolean effectively loop over columns startRow expression start at this row (variable: rows) countRows expression use this many rows (variable: rows) variableLoop expression define a variable by name = expression (input columns are named data0, data1, ...) variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step rangeStart double start of range rangeEnd double end of range (inclusive) sampling double sampling variableLoopNumber string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step directory filename directory pattern string wildcard pattern isRegularExpression boolean pattern is a regular expression variableLoopFile string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step command filename each output line becomes a loop iteration silently boolean without showing the output. variableLoopString string name of the variable to be replaced variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfilePlatform filename platform info file equipmentType choice equipment type to loop over all loop over all types gnssAntenna loop over antennas gnssReceiver loop over receivers slrStation loop over SLR stations slrRetroReflector loop over laser retroreflectors satelliteIdentifier loop over satellite identifiers other loop over other types variableLoopName string variable with name variableLoopSerial string variable with serial variableLoopInfo string variable with radome (antenna) or version (receiver) variableLoopTimeStart string variable with start time variableLoopTimeEnd string variable with end time variableLoopPositionX string variable with position x variableLoopPositionY string variable with position y variableLoopPositionY string variable with position z variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step loop loopType subloop variableLoopIndex string variable with index of current iteration (starts with zero) condition conditionType check before each loop step loop loopType sortString string use {loopVariables}, sort alphabetically descending boolean sorting descending instead of ascending removeDuplicatesString string use {loopVariables}, remove duplicates (order is preserved) variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step table choice define table by rows/columns rowWise sequence define table by rows row sequence define table by rows cell string explicit list of cells in row/column columnWise sequence define table by columns column sequence define table by columns cell string explicit list of cells in row/column variableLoopString string 1. variable name for the 1. column, next variable name for the 2. column, ... variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step inputfileGnssStationInfo filename station/transmitter info file infoType choice info to loop over antenna loop over antennas receiver loop over receivers variableLoopName string variable with antenna/receiver name variableLoopSerial string variable with antenna/receiver serial variableLoopInfo string variable with radome (antenna) or version (receiver) variableLoopTimeStart string variable with antenna/receiver start time variableLoopTimeEnd string variable with antenna/receiver end time variableLoopIndex string variable with index of current iteration (starts with zero) variableLoopCount string variable with total number of iterations condition conditionType check before each loop step', 'display_text': 'Generates a sequence with variables to loop over. The variable names can be set with variableLoop... and the current values are assigned to the variables for each loop step.
With condition only a subset of loop steps are performed. The variableLoopIndex and variableLoopCount are not affected by the condition. The result would therefore be the same as using LoopPrograms with a nested IfPrograms.
Loop over nested loops. First loop is outermost loop, every subsequent loop is one level below the previous loop.
SortAndRemoveDuplicates
Perform the loop in the alphabetically order defined by the evaluated sortString for each loop step. So the string must contain loop variables. If sortString is empty, no sorting will take place.
Example: The sortString={loopTime:%m} of a time series sorts the times in ascending order by month.
The same principle is used to remove duplicates. If different loop steps evaluates removeDuplicatesString to the same string, only the first loop step is executed.
ManualTable
DEPRECATED since 2025-09-27. Use LoopStringTable instead.
FileGnssStationInfo
DEPRECATED since 2022-11-11. Use LoopPlatformEquipment instead.'},
'magnetosphereType': { 'name': 'magnetosphereType', 'key': 'magnetosphereType', 'description': 'This class provides functions of the magnetic field of the Earth. International Geomagnetic Reference Field.', 'config_table': 'inputfileMagneticNorthPole filename time series of north pole', 'display_text': 'This class provides functions of the magnetic field of the Earth.
IGRF
International Geomagnetic Reference Field.'},
'matrixGeneratorType': { 'name': 'matrixGeneratorType', 'key': 'matrixGeneratorType', 'description': 'This class provides a matrix used e.g. by MatrixCalculate . If multiple matrices are given the resulting matrix is the sum all and the size is exandeded to fit all matrices. Before the computation of each submatrix the variables rowsBefore and columnsBefore with current size of the overall matrix are set. As all matrices can be manipulated before, complex matrix operations are possible. Matrix from file . Matrix from a normal equation file . The symmetric normal matrix, the right hand side vector, the lPl vector, or the observation count can be selected. Matrix filled by an expression. For each element of the new matrix the variables row and column are set and the expression element is evaluated. Excample: The element = if(row==column,1,0) generates an identity matrix. The elements of a matrix are replaced an expression. For each element of the matrix the variables data , row , column are set and the expression element is evaluated and replaces the element. Additionally the standard data variables are available (assigned each row), see dataVariables . Given two matrices and this class computes , where is an expression (for example data0*data1 ). For each element of the matrix the variables data0 , data1 , row , column are set and the expression element is evaluated. Append matrix to the right (first row) or bottom (first column). Shift start row and start column of a matrix. In other words: zero lines and columns are inserted at the beginning of the matrix. Slice of a matrix. Matrix reshaped columnwise to new row and columns. Reorder rows or columns of a matrix by an index vectors. The index vector can be created with ParameterSelection2IndexVector . Sort matrix by column in ascending order by default or in descending order. Transposed of a matrix . Multiplication of matrices. Inverse of a matrix . Upper triangular natrix of the cholesky decomposition of a symmetric matrix . Symmetric matrix from rank k update: . Computes the eigenvalues of a square matrix and gives a vector of eigenvalues for symmetric matrices or a matrix with 2 columns with real and imaginary parts in general case. Extract the diagonal or subdiagnoal ( vector) of a matrix. The zero diagonal means the main diagonal, a positive value the superdiagonal, and a negative the subdiagonal. Generate a matrix from a diagonal vector. Set type (matrix, matrixSymmetricUpper, matrixSymmetricLower, matrixTriangularUpper, matrixTriangularLower) of a matrix. If the type is not matrix, the matrix must be quadratic. Symmetric matrices are filled symmetric and for triangular matrix the other triangle is set to zero.', 'config_table': 'inputfileMatrix filename factor double inputfileNormalEquation filename type choice normalMatrix rightHandSide lPl observationCount factor double rows expression (variables: rowsBefore, columnsBefore) columns expression (variables: rowsBefore, columnsBefore) element expression for each element of matrix (variables: row, column, rows, columns, rowsBefore, columnsBefore) matrix matrixGeneratorType element expression for each element of matrix (variables: data, row, column, rows, columns, rowsBefore, columnsBefore) matrix1 matrixGeneratorType matrix2 matrixGeneratorType expression expression for each element of matrix (variables: data0, data1, row, column, rows, columns, rowsBefore, columnsBefore) matrix matrixGeneratorType side choice right bottom diagonal matrix matrixGeneratorType startRow expression start row (variables: rowsBefore, columnsBefore, rows, columns) startColumn expression start column (variables: rowsBefore, columnsBefore, rows, columns) matrix matrixGeneratorType startRow expression start row of matrix (variables: rowsBefore, columnsBefore, rows, columns) startColumn expression start column of matrix (variables: rowsBefore, columnsBefore, rows, columns) rows expression 0: until end (variables: rowsBefore, columnsBefore, rows, columns) columns expression 0: until end (variables: rowsBefore, columnsBefore, rows, columns) matrix matrixGeneratorType rows expression 0: auto-determine rows, (variables: rowsBefore, columnsBefore) columns expression 0: auto-determine columns (variables: rowsBefore, columnsBefore) matrix matrixGeneratorType inputfileIndexVectorRow filename index in input matrix or -1 for new parameter. inputfileIndexVectorColumn filename index in input matrix or -1 for new parameter. matrix matrixGeneratorType column uint sort by column, top = highest priority descending boolean matrix matrixGeneratorType matrix1 matrixGeneratorType matrix2 matrixGeneratorType factor double matrix matrixGeneratorType pseudoInverse boolean compute pseudo inverse instead of regular one matrix matrixGeneratorType matrix matrixGeneratorType factor double matrix matrixGeneratorType eigenVectors boolean return eigen vectors instead of eigen values matrix matrixGeneratorType diagonal int zero: main diagonal, positive: superdiagonal, negative: subdiagonal matrix matrixGeneratorType (nx1) or (1xn) diagonal vector diagonal int zero: main diagonal, positive: superdiagonal, negative: subdiagonal matrix matrixGeneratorType type choice matrix matrixSymmetricUpper matrixSymmetricLower matrixTriangularUpper matrixTriangularLower', 'display_text': 'This class provides a matrix used e.g. by MatrixCalculate. If multiple matrices are given the resulting matrix is the sum all and the size is exandeded to fit all matrices. Before the computation of each submatrix the variables rowsBefore and columnsBefore with current size of the overall matrix are set. As all matrices can be manipulated before, complex matrix operations are possible.
Matrix from a normal equation file. The symmetric normal matrix, the right hand side vector, the lPl vector, or the observation count $(1\\times1)$ can be selected.
Expression
Matrix filled by an expression. For each element of the new matrix the variables row and column are set and the expression element is evaluated.
Excample: The element=if(row==column,1,0) generates an identity matrix.
Element manipulation
The elements of a matrix are replaced an expression. For each element of the matrix the variables data, row, column are set and the expression element is evaluated and replaces the element. Additionally the standard data variables are available (assigned each row), see dataVariables.
ElementWiseOperation
Given two matrices $\\mathbf{A}$ and $\\mathbf{B}$ this class computes $c_{ij} = f(a_{ij}, b_{ij})$, where $f$ is an expression (for example data0*data1). For each element of the matrix the variables data0, data1, row, column are set and the expression element is evaluated.
Append
Append matrix to the right (first row) or bottom (first column).
Shift
Shift start row and start column of a matrix. In other words: zero lines and columns are inserted at the beginning of the matrix.
Slice
Slice of a matrix.
Reshape
Matrix reshaped columnwise to new row and columns.
Reorder
Reorder rows or columns of a matrix by an index vectors. The index vector can be created with ParameterSelection2IndexVector.
Sort
Sort matrix by column in ascending order by default or in descending order.
Transpose
Transposed of a matrix $\\M A^T$.
Multiplication
Multiplication of matrices.
Inverse
Inverse of a matrix $\\M A^{-1}$.
Cholesky
Upper triangular natrix of the cholesky decomposition of a symmetric matrix $\\M A=\\M W^T\\M W$.
RankKUpdate
Symmetric matrix from rank k update: $\\M A^T\\M A$.
EigenValues
Computes the eigenvalues of a square matrix and gives a vector of eigenvalues for symmetric matrices or a matrix with 2 columns with real and imaginary parts in general case.
Diagonal
Extract the diagonal or subdiagnoal ($n\\times 1$ vector) of a matrix. The zero diagonal means the main diagonal, a positive value the superdiagonal, and a negative the subdiagonal.
FromDiagonal
Generate a matrix from a diagonal vector.
Set type
Set type (matrix, matrixSymmetricUpper, matrixSymmetricLower, matrixTriangularUpper, matrixTriangularLower) of a matrix. If the type is not matrix, the matrix must be quadratic. Symmetric matrices are filled symmetric and for triangular matrix the other triangle is set to zero.'},
'miscAccelerationsType': { 'name': 'miscAccelerationsType', 'key': 'miscAccelerationsType', 'description': 'This class gives the non conservative forces acting on satellites. The relativistic effect to the acceleration of an artificial Earth satellite according to IERS2010 conventions. The macro model and the attitude of the satellite is not needed. This class computes acceleration acting on a satellite caused by Solar and Earth radiation pressure and thermal radiation. Solar radiation pressure: The solar constant at 1 AU can be set via solarFlux . The factorSolarRadation can be used to scale the computed acceleration of the direct solar radiation. Earth radiation pressure: Input are a time series of gridded albedo values (unitless) as and a time series of gridded longwave flux (W/m ) as . Both files are optional and if not specified, the respective effect on the acceleration is not computed. The factorEarthRadation can be used to scale the computed acceleration of the earth radiation. The thermal radiation (TRP) of the satellite itself is either computed as direct re-emission or based on the actual temperature of the satellite surfaces, depending on the setings of the satellite macro model . The second one uses a transient temperature model with a temporal differential equation which disallows parallel computing. The factorThermalRadiation can be used to scale the computed acceleration of the TRP. The algorithms are described in: Woeske et. al. (2019), GRACE accelerometer calibration by high precision non-gravitational force modeling, Advances in Space Research, https://doi.org/10.1016/j.asr.2018.10.025 . Atmospheric drag model. Algorithm for the atmospheric drag modelling is based on the free molecule flow theory by Sentman 1961. An analytical expression of this treatise is given in Moe and Moe 2005. Sentman L. (1961), Free molecule flow theory and its application to the determination of aerodynamic forces, Technical report. Moe K., Moe M. M. (2005), Gas-surface interactions and satellite drag coefficients, Planetary and Space Science 53(8), 793-801, doi:10.1016/j.pss.2005.03.005. Optional determination steps: Turn temperature on or off. In the first case, the model mentioned above is applied, which estimates variable drag and lift coefficients - in the latter case a constant drag coefficient can be specified. Turn wind on/off: It enables the usage of the Horizontal Wind Model 2014 to add additional thermospheric winds in the calculation process. Atmospheric drag computed from thermospheric density along the orbit ( , MISCVALUE). The is used to to compute temperature and wind. For further details see . The thrust (acceleration) in the opposite direction the antenna is facing which is generated by satellite antenna broadcasts. The thrust is defined in the satellite macro model. Reads a solution vector from file which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE ). The coefficients of the vector are interpreted from position indexStart (counting from zero) with help of . If the solution file contains solution of several right hand sides you can choose one with number rightSide (counting from zero). The computed result is multiplied with factor . Groups a set of and has no further effect itself. DEPRECATED since 2022-12-19. Use radiationPressure instead. DEPRECATED since 2022-12-19. Use radiationPressure instead.', 'config_table': 'beta double PPN (parameterized post-Newtonian) parameter gamma double PPN (parameterized post-Newtonian) parameter J double Earth’s angular momentum per unit mass [m**2/s] GM double Geocentric gravitational constant factor double the result is multiplied by this factor solarflux double solar flux constant in 1 AU [W/m^2] eclipse eclipseType inputfileAlbedoTimeSeries filename GriddedDataTimeSeries of albedo values (unitless) inputfileLongwaveFluxTimeSeries filename GriddedDataTimeSeries of longwave flux values [W/m^2] factorSolarRadation double Solar radiation pressure is multiplied by this factor factorEarthRadation double Earth radiation preussure is multiplied by this factor factorThermalRadiation double Thermal (re-)radiation is multiplied by this factor thermosphere thermosphereType earthRotation double [rad/s] considerTemperature boolean compute drag and lift, otherwise simple drag coefficient is used considerWind boolean factor double the result is multiplied by this factor inputfileDensity filename density along orbit, MISCVALUE (kg/m^3) thermosphere thermosphereType used to compute temperature and wind earthRotation double [rad/s] considerTemperature boolean compute drag and lift, otherwise simple drag coefficient is used considerWind boolean factor double the result is multiplied by this factor factor double the result is multiplied by this factor parametrization parametrizationAccelerationType inputfileSolution filename solution vector indexStart uint position in the solution vector rightSide uint if solution contains several right hand sides, select one factor double the result is multiplied by this factor, set -1 to subtract the field miscAccelerations miscAccelerationsType factor double the result is multiplied by this factor solarflux double solar flux constant in 1 AU [W/m**2] eclipse eclipseType factor double the result is multiplied by this factor, set -1 to subtract the field inputfileReflectivity filename inputfileEmissivity filename solarflux double solar flux constant in 1 AU [W/m**2] factor double the result is multiplied by this factor, set -1 to subtract the field', 'display_text': 'This class gives the non conservative forces acting on satellites.
Relativistic effect
The relativistic effect to the acceleration of an artificial Earth satellite according to IERS2010 conventions.
The macro model and the attitude of the satellite is not needed.
RadiationPressure
This class computes acceleration acting on a satellite caused by Solar and Earth radiation pressure and thermal radiation.
Solar radiation pressure: The solar constant at 1 AU can be set via solarFlux. The factorSolarRadation can be used to scale the computed acceleration of the direct solar radiation.
Earth radiation pressure: Input are a time series of gridded albedo values (unitless) as inputfileAlbedoTimeSeries and a time series of gridded longwave flux (W/m$^2$) as inputfileLongwaveFluxTimeSeries. Both files are optional and if not specified, the respective effect on the acceleration is not computed. The factorEarthRadation can be used to scale the computed acceleration of the earth radiation.
The thermal radiation (TRP) of the satellite itself is either computed as direct re-emission or based on the actual temperature of the satellite surfaces, depending on the setings of the satellite macro model. The second one uses a transient temperature model with a temporal differential equation which disallows parallel computing. The factorThermalRadiation can be used to scale the computed acceleration of the TRP.
The algorithms are described in:
Woeske et. al. (2019), GRACE accelerometer calibration by high precision non-gravitational force modeling, Advances in Space Research, https://doi.org/10.1016/j.asr.2018.10.025.
AtmosphericDrag
Atmospheric drag model. Algorithm for the atmospheric drag modelling is based on the free molecule flow theory by Sentman 1961. An analytical expression of this treatise is given in Moe and Moe 2005.
Sentman L. (1961), Free molecule flow theory and its application to the determination of aerodynamic forces, Technical report.
Moe K., Moe M. M. (2005), Gas-surface interactions and satellite drag coefficients, Planetary and Space Science 53(8), 793-801, doi:10.1016/j.pss.2005.03.005.
Optional determination steps: Turn temperature on or off. In the first case, the model mentioned above is applied, which estimates variable drag and lift coefficients - in the latter case a constant drag coefficient can be specified.
Turn wind on/off: It enables the usage of the Horizontal Wind Model 2014 to add additional thermospheric winds in the calculation process.
AtmosphericDragFromDensityFile
Atmospheric drag computed from thermospheric density along the orbit (inputfileDensity, MISCVALUE). The thermosphere is used to to compute temperature and wind. For further details see atmosphericDrag.
Antenna thrust
The thrust (acceleration) in the opposite direction the antenna is facing which is generated by satellite antenna broadcasts. The thrust is defined in the satellite macro model.
FromParametrization
Reads a solution vector from file inputfileSolution which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE). The coefficients of the vector are interpreted from position indexStart (counting from zero) with help of parametrization. If the solution file contains solution of several right hand sides you can choose one with number rightSide (counting from zero).
DEPRECATED since 2022-12-19. Use radiationPressure instead.
Albedo
DEPRECATED since 2022-12-19. Use radiationPressure instead.'},
'noiseGeneratorType': { 'name': 'noiseGeneratorType', 'key': 'noiseGeneratorType', 'description': 'This class implements the generation of different types of noise. It provides a generic interface that can be implemented by different types of generators. The characteristics of the generated noise is determined by the generators. See the appropriate documentation for more information. The noise is Gaussian with a standard deviation sigma . The noise is computed via a pseudo random sequence with a start value given by initRandom . The same value always yields the same sequence. Be careful in parallel mode as all nodes generates the same pseudo random sequence. If this value is set to zero a real random value is used as starting value. This generator creates noise defined by a one sided PSD. The psd is an expression controlled by the variable \'freq\'. To determine the frequency sampling must be given. Generated noise is filtered by a . This generator creates noise that conforms to a power law relationship, where the power of the noise at a frequency is proportional to , with a typically between -2 and 2.', 'config_table': 'sigma double standard deviation initRandom uint start value for pseudo random sequence, 0: real random noise noiseGeneratorType Basis noise psd expression one sided PSD (variable: freq [Hz]) [unit^2/Hz] sampling double to determine frequency [seconds] filter digitalFilterType digital filter noise noiseGeneratorType Basis noise warmupEpochCount uint number of additional epochs at before start and after end overSamplingFactor uint noise with multiple higher sampling -> filter -> decimate noise noiseGeneratorType Basis noise alpha double Exponent of the power law relationship 1/f^alpha', 'display_text': 'This class implements the generation of different types of noise. It provides a generic interface that can be implemented by different types of generators. The characteristics of the generated noise is determined by the generators. See the appropriate documentation for more information.
White
The noise is Gaussian with a standard deviation sigma. The noise is computed via a pseudo random sequence with a start value given by initRandom. The same value always yields the same sequence. Be careful in parallel mode as all nodes generates the same pseudo random sequence. If this value is set to zero a real random value is used as starting value.
ExpressionPSD
This generator creates noise defined by a one sided PSD. The psd is an expression controlled by the variable \'freq\'. To determine the frequency sampling must be given.
This generator creates noise that conforms to a power law relationship, where the power of the noise at a frequency is proportional to $1/f^\\alpha$, with a typically between -2 and 2.'},
'normalEquationType': { 'name': 'normalEquationType', 'key': 'normalEquationType', 'description': 'This class provides a system of normal equations. This total system is the weighted sum of individual normals. The normals do not need to have the same dimension. The dimension of the total combined system is chosen to cover all individual systems. For each normal a startIndex is required which indicates the position of the first unknown of the individual normal within the combined parameter vector. The of the relative weights are defined by aprioriSigma in a first step. If an apriori solution is given or the normals are solved iteratively the weights are determined by means of variance compoment estimation (VCE), see NormalsSolverVCE : where is the number of observations. The square sum of the residuals is calculated by The system of normal equations can be solved with several right hand sides at once. But only one right hand side, which can be selected with the index rightHandSide (counting from zero), can be used to compute the variance factors. The combined normal and the solution are taken from the previous iteration step. In case of the algorithm is a little bit different as described below. This class acculumates normal equations from observation equations. The class computes the linearized and decorrelated equation system for each arc : The arc depending parameters are eliminated and the system of normal equations is acculumated according to This class acculumates normal equations from observation equations. The class computes the linearized and decorrelated equation system for each arc : The arc depending parameters are eliminated and the system of normal equations is acculumated according to The variance of each individual arc is determined by where is the number of observations. If an apriori solution is not given at the first iteration step a zero vector is assumed. Reads a system of normal equations from file as generated by e.g. NormalsBuild . Set up a system of normal equations where is a diagonal matrix whose elements are given as a vector by and is the right hand side towards which will be regularized. It can be given by . The diagonal matrix can be generated with NormalsRegularizationBorders , NormalsRegularizationSphericalHarmonics , or MatrixCalculate . If is not given a unit matrix is assumed. The right hand side may be generated with Gravityfield2SphericalHarmonicsVector . If is not given a zero vector is assumed. Generalized regularization which is represented by the observation equation There are no requirements for partial covariance matrices except for them being symmetric. The accumulated covariance matrix must be positive definite however. The variance components are estimated during the adjustment process and are assumed to be positive. All must be of same size and must match the dimension of (if provided, otherwise a zero vector of appropriate dimensions is created). The parameter aprioriSigma determines the initial variance factor for the partial covariance matrices. Either one can be supplied or one for each . The regularization matrix can be applied to a subset of parameters by adjusting startIndex .', 'config_table': 'observation observationType aprioriSigma double startIndex uint add this normals at index of total matrix (counting from 0) inputfileArcList filename to accelerate computation observation observationType startIndex uint add this normals at index of total matrix (counting from 0) inputfileArcList filename to accelerate computation inputfileNormalEquation filename aprioriSigma double startIndex uint add this normals at index of total matrix (counting from 0) inputfileDiagonalMatrix filename Vector with the diagonal elements of the weight matrix inputfileBias filename Matrix with right hand sides aprioriSigma double startIndex uint regularization of parameters starts at this index (counting from 0) inputfilePartialCovarianceMatrix filename symmetric matrix (sum of all matrices must be positive definite) inputfileBiasMatrix filename bias vector (default: zero vector) aprioriSigma double apriori sigmas for initial iteration (default: 1.0) startIndex uint regularization of parameters starts at this index (counting from 0)', 'display_text': 'This class provides a system of normal equations. This total system is the weighted sum of individual normals. \\[ \\M N_{total} = \\sum_{k=1} \\frac{1}{\\sigma_k^2}\\M N_k \\qquad\\text{and}\\qquad \\M n_{total} = \\sum_{k=1} \\frac{1}{\\sigma_k^2} \\M n_k. \\]The normals do not need to have the same dimension. The dimension of the total combined system is chosen to cover all individual systems. For each normal a startIndex is required which indicates the position of the first unknown of the individual normal within the combined parameter vector.
The $\\sigma_k$ of the relative weights are defined by aprioriSigma in a first step. If an apriori solution inputfileApproxSolution is given or the normals are solved iteratively the weights are determined by means of variance compoment estimation (VCE), see NormalsSolverVCE: \\[ \\sigma_k^2 = \\frac{\\M e_k^T\\M P\\M e_k} {n_k-\\frac{1}{\\sigma_k^2}\\text{trace}\\left(\\M N_k\\M N_{total}^{-1}\\right)}, \\]where $n_k$ is the number of observations. The square sum of the residuals is calculated by \\[ \\M e_k^T\\M P\\M e_k = \\M x^T\\M N_k\\M x - 2\\M n_k^T\\M x + \\M l_k^T\\M P_k\\M l_k. \\]The system of normal equations can be solved with several right hand sides at once. But only one right hand side, which can be selected with the index rightHandSide (counting from zero), can be used to compute the variance factors. The combined normal $\\M N_{total}$ and the solution $\\M x$ are taken from the previous iteration step. In case of DesignVCE the algorithm is a little bit different as described below.
Design
This class acculumates normal equations from observation equations. The class observation computes the linearized and decorrelated equation system for each arc $i$: \\[ \\M l_i = \\M A_i \\M x + \\M B_i \\M y_i + \\M e_i. \\]The arc depending parameters $\\M y_i$ are eliminated and the system of normal equations is acculumated according to \\[ \\M N = \\sum_{i=1}^m \\M A_i^T \\M A_i \\qquad\\text{and}\\qquad \\M n = \\sum_{i=1}^m \\M A_i^T \\M l_i. \\]
DesignVCE
This class acculumates normal equations from observation equations. The class observation computes the linearized and decorrelated equation system for each arc $i$: \\[ \\M l_i = \\M A_i \\M x + \\M B_i \\M y_i + \\M e_i. \\]The arc depending parameters $\\M y_i$ are eliminated and the system of normal equations is acculumated according to \\[ \\M N = \\sum_{i=1} \\frac{1}{\\sigma_i^2}\\M A_i^T \\M A_i \\qquad\\text{and}\\qquad \\M n = \\sum_{i=1} \\frac{1}{\\sigma_i^2} \\M A_i^T \\M l_i. \\]The variance $\\sigma_i^2$ of each individual arc is determined by \\[ \\sigma_i^2 = \\frac{(\\M l_i-\\M A_i\\M x)^T(\\M l_i-\\M A_i\\M x)} {n_i-\\frac{1}{\\sigma_i^2}\\text{trace}\\left(\\M A_i^T \\M A_i\\M N_{total}^{-1}\\right)}, \\]where $n_i$ is the number of observations. If an apriori solution is not given at the first iteration step a zero vector is assumed.
Generalized regularization which is represented by the observation equation \\[ \\mathbf{x}_0 = \\mathbf{I} \\mathbf{x} + \\mathbf{v}, \\mathbf{v} \\sim \\mathcal{N}(0, \\sum_k \\sigma^2_k \\mathbf{V}_k). \\] There are no requirements for partial covariance matrices $\\mathbf{V}_k$ except for them being symmetric. The accumulated covariance matrix $\\sum_k \\sigma^2_k \\mathbf{V}_k$ must be positive definite however. The variance components $\\sigma^2_k$ are estimated during the adjustment process and are assumed to be positive. All inputfilePartialCovarianceMatrix must be of same size and must match the dimension of inputfileBiasMatrix (if provided, otherwise a zero vector of appropriate dimensions is created).
The parameter aprioriSigma determines the initial variance factor for the partial covariance matrices. Either one $\\sigma_0$ can be supplied or one for each $\\mathbf{V}_k$.
The regularization matrix can be applied to a subset of parameters by adjusting startIndex.'},
'observationType': { 'name': 'observationType', 'key': 'observationType', 'description': 'This class sets up the observation equations in linearized Gauss-Markov model The observations are divided into short data blocks which can be computed independently and so easily can be parallelized. Usually these data blocks are short arcs of a satellite\'s orbit. In most cases the unknown parameter vector contains coefficients of a gravity field parametrization given by . Additional parameters like instrument calibration parameters are appended at the end of the vector . It is possible to give several observation vectors in one model. The observations within each arc are decorrelated in the following way: In a first step a Cholesky decomposition of the covariance matrix is performed where is an upper regular triangular matrix. In a second step the transformation gives an estimation from decorrelated observations with equal variance Usually the arc dependent parameters are eliminated in the next step and not mentioned for the parameter names in the following. The observation equations for precise orbit data (POD) are formulated as variational equations. It is based on inputfileVariational calculated with PreprocessingVariationalEquation . Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree . The kinematic positions as pseudo observations are taken from rightHandSide and should not be given equally spaced in time. The observation equations are interpolated to these times by a moving polynomial of degree interpolationDegree . The accuracy or the full covariance matrix of the precise orbit data is provided in and can be estimated with PreprocessingPod . accelerateComputation : In the event that the sampling of the kinematic orbit is much higher than the sampling of the variational equations (e.g. 1 second vs. 5 seconds) the accumulation of the observation equations can be accelerated by transforming the observation equations where describes the interpolation of the sampling of the variational design matrix to the sampling of the observations with more rows than columns. The QR decomposition can be used to transform the observation equations As the zero lines should not be considered, the computational time for the accumulation is reduced. This option is not meaningful for evaluating the residuals such as in PreprocessingPod . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite>:<parametrizationAcceleration>:*:* , <satellite>:arc<no>.<parametrizationAcceleration>:*:* , <satellite>:arc<no>.position0.x:: , <satellite>:arc<no>.position0.y:: , <satellite>:arc<no>.position0.z:: . <satellite>:arc<no>.velocity0.x:: , <satellite>:arc<no>.velocity0.y:: , <satellite>:arc<no>.velocity0.z:: . The observation equations for precise orbit data (POD) of short arcs are given by with the integral kernel and the normalized time The kinematic positions as pseudo observations are taken from . From these positions the influence of the reference forces is subtracted which are computed with the background models in . The integral is solved by the integration of a moving interpolation polynomial of degree integrationDegree . The boundary values and (satellite\'s state vector) are estimated per arc and are usually directly eliminated if keepSatelliteStates is not set. The unknown gravity field parametrized by is not evaluated at the observed positions but at the orbit given by . The same is true for the reference forces. The linearized effect of the gravity field change by the position adjustment is taken into account by gradientfield . This may be a low order field up to a spherical harmonics degree of or . The , , and must be synchronous and must be given with a constant sampling and without any gaps in each short arc (see InstrumentSynchronize ). The kinematic positions should not given equally spaced in time but must be divided into the same arcs as the other instrument data. The observation equations are interpolated to this time by a polynomial interpolation with degree interpolationDegree . The accuracy or the full covariance matrix of the precise orbit data is provided in and can be estimated with PreprocessingPod . For accelerateComputation see . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite>:<parametrizationAcceleration>:*:* , and for each arc if keepSatelliteStates is set <satellite>:arc<no>.position.start.x:: , <satellite>:arc<no>.position.start.y:: , <satellite>:arc<no>.position.start.z:: . <satellite>:arc<no>.position.end.x:: , <satellite>:arc<no>.position.end.y:: , <satellite>:arc<no>.position.end.z:: . The observation equations for precise orbit data (POD) are given by where the accelerations of the satellite are derived from the kinematic positions in . The orbit differentation is performed by a moving polynomial interpolation or approximation with degree interpolationDegree and number of used epochs numberOfEpochs . The reference forces are computed with the background models in . All instrument data , , and must be synchronous and be given with a constant sampling without any gaps in each short arc (see InstrumentSynchronize ). The unknown gravity field parametrized by is not evaluated at the observed positions but at the orbit given by . The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but in most cases the kinematic orbit provides good results. The accuracy or the full covariance matrix of the precise orbit data is provided in and can be estimated with PreprocessingPod . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite>:<parametrizationAcceleration>:*:* . The observation equations for precise orbit data (POD) are given by where the velocities of the satellite are derived from the kinematic positions in and the Earth\'s rotation vector is modeled within . The orbit differentiation is performed by a polynomial interpolation with degree interpolationDegree . The integrals are solved by a polynomial interpolation with degree integrationDegree . The reference forces are computed with the background models in . All instrument data , , and must be synchronous and be given with a constant sampling without any gaps in each short arc (see InstrumentSynchronize ). The unknown gravity potential parametrized by is not evaluated at the observed positions but at the orbit given by . The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but in most cases the kinematic orbit provides good results. An unknown energy bias per arc is parametrized by and should be a constant in theory but temporal changes might help to absorb other unmodelled effects. The accuracy or the full covariance matrix of the precise orbit data is provided in and can be estimated with PreprocessingPod . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* . Like (see there for details) but with two satellites and additional satellite-to-satellite (SST) observations. If multiple are given, all data are added together. So corrections in extra files like the light time correction can easily be added. Empirical parameters for the SST observations can be set up with . The accuracy or the full covariance matrix of SST is provided in . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite1>:<parametrizationAcceleration>:*:* , <satellite1>:arc<no>.<parametrizationAcceleration>:*:* , <satellite1>:arc<no>.position0.x:: , <satellite1>:arc<no>.position0.y:: , <satellite1>:arc<no>.position0.z:: . <satellite1>:arc<no>.velocity0.x:: , <satellite1>:arc<no>.velocity0.y:: , <satellite1>:arc<no>.velocity0.z:: . <satellite2>:<parametrizationAcceleration>:*:* , <satellite2>:arc<no>.<parametrizationAcceleration>:*:* , <satellite2>:arc<no>.position0.x:: , <satellite2>:arc<no>.position0.y:: , <satellite2>:arc<no>.position0.z:: . <satellite2>:arc<no>.velocity0.x:: , <satellite2>:arc<no>.velocity0.y:: , <satellite2>:arc<no>.velocity0.z:: . <satellite1>.<satellite2>:<parametrizationSatelliteTracking>:*:* . Like (see there for details) but with two satellites and additional satellite-to-satellite (SST) observations. If multiple are given all data are added together. So corrections in extra files like the light time correction can easily be added. Empirical parameters for the SST observations can be set up with . The accuracy or the full covariance matrix of SST is provided in . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite1>:<parametrizationAcceleration>:*:* , <satellite2>:<parametrizationAcceleration>:*:* , <satellite1>.<satellite2>:<parametrizationSatelliteTracking>:*:* , and for each arc if keepSatelliteStates is set <satellite1>:arc<no>.position.start.x:: , <satellite1>:arc<no>.position.start.y:: , <satellite1>:arc<no>.position.start.z:: . <satellite1>:arc<no>.position.end.x:: , <satellite1>:arc<no>.position.end.y:: , <satellite1>:arc<no>.position.end.z:: . <satellite2>:arc<no>.position.start.x:: , <satellite2>:arc<no>.position.start.y:: , <satellite2>:arc<no>.position.start.z:: . <satellite2>:arc<no>.position.end.x:: , <satellite2>:arc<no>.position.end.y:: , <satellite2>:arc<no>.position.end.z:: . Like (see there for details) but with two simultaneous satellite-to-satellite (SST) observations. This class reads two SST observation files ( and ). Empirical parameters for the SST observations can be set up independently for both SST observation types with and . Both SST observation types are reduced by the same background models and the same impact of accelerometer measurements. The covariance matrix of the reduced observations should not consider the instrument noise only ( ) but must take the cross correlations into account. The covariance matrix of the reduced observations is given by The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , <satellite1>:<parametrizationAcceleration>:*:* , <satellite1>:arc<no>.<parametrizationAcceleration>:*:* , <satellite1>:arc<no>.position0.x:: , <satellite1>:arc<no>.position0.y:: , <satellite1>:arc<no>.position0.z:: . <satellite1>:arc<no>.velocity0.x:: , <satellite1>:arc<no>.velocity0.y:: , <satellite1>:arc<no>.velocity0.z:: . <satellite2>:<parametrizationAcceleration>:*:* , <satellite2>:arc<no>.<parametrizationAcceleration>:*:* , <satellite2>:arc<no>.position0.x:: , <satellite2>:arc<no>.position0.y:: , <satellite2>:arc<no>.position0.z:: . <satellite2>:arc<no>.velocity0.x:: , <satellite2>:arc<no>.velocity0.y:: , <satellite2>:arc<no>.velocity0.z:: . <satellite1>.<satellite2>:<parametrizationSatelliteTracking1>:*:* . <satellite1>.<satellite2>:<parametrizationSatelliteTracking2>:*:* . Observation equations for satellite gravity gradiometry (SGG) From the observations precomputed together with other background models are reduced, all given in . All instrument data , , and must be synchronous and be diveded into each short arcs (see InstrumentSynchronize ). Additional to the an (temporal changing) bias for each gradiometer component and arc can be estimated with . The accuracy or the full covariance matrix of the gradiometer is provided in covarianceSgg and can be estimated with PreprocessingGradiometer . The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* . The gravity field is estimated from point wise measurements. The gravity field parametrization is given by . There is no need to have the data regular distributed or given on a sphere or ellipsoid. The type of the gridded data (e.g gravity anomalies or geoid heights) must be set with . A can be reduced beforehand. The observations at given positions are calculated from . The input columns are enumerated by data0 , data1 , , see dataVariables . The observations can be divided into small blocks for parallelization. With blockingSize set the maximum count of observations in each block. The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* . The gravity field parametrized by is estimated from deflections of the vertical measurements. A can be reduced beforehand. The observations in north direction and in east direction at given positions are calculated from . The input columns are enumerated by data0 , data1 , , see dataVariables . The ellipsoid parameters R and inverseFlattening are used to define the local normal direction. The observations can be divided into small blocks for parallelization. With blockingSize set the maximum count of observations in each block. The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* . Observation equations for displacements of a list of stations due to the effect of time variable loading masses. The displacement of a station is calculated according to where is the normal gravity, the load Love and Shida numbers are given by and the load Love numbers are given by . The are the spherical harmonics expansion of degree of the full time variable gravitational potential (potential of the loading mass + deformation potential) parametrized by . Additional parameters can be setup to estimate the realization of the reference frame of the station coordinates ( estimateTranslation , estimateRotation , and estimateScale ). The observations at stations coordinates are calculated from . The input columns are enumerated by data0 , data1 , , see dataVariables . The ellipsoid parameters R and inverseFlattening are used to define the local frame (north, east, up). The following parameters with parameter names are set up: *:<parametrizationGravity>:*:* , *:translation.x:*:* , *:translation.y:*:* , *:translation.z:*:* , *:scale:*:* , *:rotation.x:*:* , *:rotation.y:*:* , *:rotation.z:*:* . See also Gravityfield2DisplacementTimeSeries . Reference: Rietbroek (2014): Retrieval of Sea Level and Surface Loading Variations from Geodetic Observations and Model Simulations: an Integrated Approach, Bonn, 2014. - Dissertation, https://nbn-resolving.org/urn:nbn:de:hbz:5n-35460', 'config_table': 'rightHandSide sequence input for observation vectors inputfileOrbit filename kinematic positions as observations inputfileVariational filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n accelerateComputation boolean acceleration of computation by transforming the observations covariancePod covariancePodType covariance matrix of kinematic orbits inputfileSatelliteModel filename satellite macro model rightHandSide podRightSideType input for the reduced observation vector inputfileOrbit filename used to evaluate the observation equations, not used as observations inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters keepSatelliteStates boolean set boundary values of each arc global integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n accelerateComputation boolean acceleration of computation by transforming the observations covariancePod covariancePodType covariance matrix of kinematic orbits inputfileSatelliteModel filename satellite macro model rightHandSide podRightSideType input for the reduced observation vector inputfileOrbit filename used to evaluate the observation equations, not used as observations inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration parametrizationAccelerationType orbit force parameters interpolationDegree uint orbit differentation by polynomial approximation of degree n numberOfEpochs uint number of used Epochs for polynom computation covariancePod covariancePodType covariance matrix of kinematic orbits inputfileSatelliteModel filename satellite macro model rightHandSide podRightSideType input for the reduced observation vector inputfileOrbit filename used to evaluate the observation equations, not used as observations inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization (potential) parametrizationBias parametrizationTemporalType unknown total energy per arc interpolationDegree uint orbit differentiation by polynomial approximation of degree n integrationDegree uint integration of forces by polynomial approximation of degree n covariancePod covariancePodType covariance matrix of kinematic orbits rightHandSide sequence input for observation vectors inputfileSatelliteTracking filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations sstType choice range rangeRate none inputfileVariational1 filename approximate position and integrated state matrix inputfileVariational2 filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst parametrizationSatelliteTrackingType satellite tracking parameter integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst covarianceSstType covariance matrix of satellite to satellite tracking observations covariancePod1 covariancePodType covariance matrix of kinematic orbits (satellite 1) covariancePod2 covariancePodType covariance matrix of kinematic orbits (satellite 2) inputfileSatelliteModel1 filename satellite macro model inputfileSatelliteModel2 filename satellite macro model rightHandSide sstRightSideType input for the reduced observation vector sstType choice range rangeRate rangeAcceleration none inputfileOrbit1 filename used to evaluate the observation equations, not used as observations inputfileOrbit2 filename used to evaluate the observation equations, not used as observations inputfileStarCamera1 filename inputfileStarCamera2 filename earthRotation earthRotationType ephemerides ephemeridesType gradientfield gravityfieldType low order field to estimate the change of the gravity by position adjustement parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst parametrizationSatelliteTrackingType satellite tracking parameter keepSatelliteStates boolean set boundary values of each arc global integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst covarianceSstType covariance matrix of satellite to satellite tracking observations covariancePod1 covariancePodType covariance matrix of kinematic orbits (satellite 1) covariancePod2 covariancePodType covariance matrix of kinematic orbits (satellite 2) rightHandSide sequence input for observation vectors inputfileSatelliteTracking1 filename ranging observations and corrections inputfileSatelliteTracking2 filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations sstType choice range rangeRate none inputfileVariational1 filename approximate position and integrated state matrix inputfileVariational2 filename approximate position and integrated state matrix ephemerides ephemeridesType parametrizationGravity parametrizationGravityType gravity field parametrization parametrizationAcceleration1 parametrizationAccelerationType orbit1 force parameters parametrizationAcceleration2 parametrizationAccelerationType orbit2 force parameters parametrizationSst1 parametrizationSatelliteTrackingType satellite tracking parameter for first ranging observations parametrizationSst2 parametrizationSatelliteTrackingType satellite tracking parameter for second ranging observations integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint orbit interpolation by polynomial approximation of degree n covarianceSst1 covarianceSstType covariance matrix of first satellite to satellite tracking observations covarianceSst2 covarianceSstType covariance matrix of second satellite to satellite tracking observations covarianceAcc covarianceSstType common covariance matrix of reduced satellite to satellite tracking observations covariancePod1 covariancePodType covariance matrix of kinematic orbits (satellite 1) covariancePod2 covariancePodType covariance matrix of kinematic orbits (satellite 2) rightHandSide sggRightSideType input for the observation vector inputfileOrbit filename inputfileStarCamera filename earthRotation earthRotationType ephemerides ephemeridesType parametrizationGravity parametrizationGravityType parametrizationBias parametrizationTemporalType per arc useXX boolean useYY boolean useZZ boolean useXY boolean useXZ boolean useYZ boolean covarianceSgg sequence sigma double general variance factor inputfileSigmasPerArc filename different accuaries for each arc (multplicated with sigma) inputfileCovarianceFunction filename covariance function in time rightHandSide sequence input for observation vectors inputfileGriddedData filename observation expression [SI units] sigma expression accuracy, 1/sigma used as weighting referencefield gravityfieldType kernel kernelType type of observations parametrizationGravity parametrizationGravityType time time for reference field and parametrization blockingSize uint segementation of the obervations if designmatrix can\'t be build at once rightHandSide sequence input for observation vectors inputfileGriddedData filename observationXi expression North-South Deflections of the Vertical [rad] observationEta expression East-West Deflections of the Vertical [rad] sigmaXi expression accuracy, 1/sigma used as weighting sigmaEta expression accuracy, 1/sigma used as weighting referencefield gravityfieldType parametrizationGravity parametrizationGravityType time time for reference field and parametrization R double reference radius for ellipsoid inverseFlattening double reference flattening for ellipsoid, 0: sphere blockingSize uint segementation of the obervations if designmatrix can\'t be build at once rightHandSide sequence input for observation vectors inputfileGriddedData filename station positions with displacement data observationNorth expression displacement [m] observationEast expression displacement [m] observationUp expression displacement [m] sigmaNorth expression accuracy, 1/sigma used as weighting sigmaEast expression accuracy, 1/sigma used as weighting sigmaUp expression accuracy, 1/sigma used as weighting inGlobalFrame boolean obs/sigmas given in global x,y,z frame instead of north,east,up referencefield gravityfieldType time time for reference field and parametrization parametrizationGravity parametrizationGravityType of loading (+defo) potential estimateTranslation boolean coordinate center estimateScale boolean scale factor of position estimateRotation boolean rotation inputfileDeformationLoadLoveNumber filename inputfilePotentialLoadLoveNumber filename if full potential is given and not only loading potential R double reference radius for ellipsoid inverseFlattening double reference flattening for ellipsoid, 0: sphere', 'display_text': 'This class sets up the observation equations in linearized Gauss-Markov model \\[\\label{gmm} \\M l = \\M A \\M x + \\M e\\qquad\\text{and}\\qquad\\mathcal{C}(\\M e) = \\sigma^2\\M P^{-1}. \\]The observations are divided into short data blocks which can be computed independently and so easily can be parallelized. Usually these data blocks are short arcs of a satellite\'s orbit. In most cases the unknown parameter vector contains coefficients of a gravity field parametrization given by parametrizationGravity. Additional parameters like instrument calibration parameters are appended at the end of the vector $\\M x$. It is possible to give several observation vectors in one model.
The observations within each arc are decorrelated in the following way: In a first step a Cholesky decomposition of the covariance matrix is performed \\[ \\M P^{-1} = \\M W^T\\M W, \\]where $\\M W$ is an upper regular triangular matrix. In a second step the transformation \\[\\label{dekorrelierung} \\bar{\\M A} = \\M W^{-T}\\M A\\qquad\\text{and}\\qquad \\bar{\\M l} = \\M W^{-T}\\M l \\]gives an estimation from decorrelated observations with equal variance \\[\\label{normal.GMM} \\bar{\\M l} = \\bar{\\M A} \\M x + \\bar{\\M e} \\qquad\\text{and}\\qquad \\mathcal{C}(\\bar{\\M e})= \\sigma^2 \\M I. \\]Usually the arc dependent parameters are eliminated in the next step and not mentioned for the parameter names in the following.
PodVariational
The observation equations for precise orbit data (POD) are formulated as variational equations. It is based on inputfileVariational calculated with PreprocessingVariationalEquation. Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree.
The kinematic positions as pseudo observations are taken from rightHandSide and should not be given equally spaced in time. The observation equations are interpolated to these times by a moving polynomial of degree interpolationDegree.
The accuracy or the full covariance matrix of the precise orbit data is provided in covariancePod and can be estimated with PreprocessingPod.
accelerateComputation: In the event that the sampling of the kinematic orbit is much higher than the sampling of the variational equations (e.g. 1 second vs. 5 seconds) the accumulation of the observation equations can be accelerated by transforming the observation equations \\[ \\M l = \\M J \\M A \\M x + \\M e, \\]where $\\M J$ describes the interpolation of the sampling of the variational design matrix $\\M A$ to the sampling of the observations $\\M l$ with more rows than columns. The QR decomposition \\[ \\M J = \\begin{pmatrix} \\M Q_1 & \\M Q_2 \\end{pmatrix} \\begin{pmatrix} \\M R \\\\ \\M 0 \\end{pmatrix}. \\]can be used to transform the observation equations \\[ \\begin{pmatrix} \\M Q_1^T \\M l \\\\ \\M Q_2^T \\M l \\end{pmatrix} = \\begin{pmatrix} \\M Q_1^T \\M R \\\\ \\M 0 \\end{pmatrix} \\M A \\M x + \\begin{pmatrix} \\M Q_1^T \\M e \\\\ \\M Q_2^T \\M e \\end{pmatrix}. \\]As the zero lines should not be considered, the computational time for the accumulation is reduced. This option is not meaningful for evaluating the residuals such as in PreprocessingPod.
The observation equations for precise orbit data (POD) of short arcs are given by \\[ {\\M r}_\\epsilon(\\tau) = {\\M r}_A(1-\\tau) + {\\M r}_B\\tau - T^2\\int_0^1 K(\\tau,\\tau\') \\left(\\M f_0(\\tau\')+\\nabla V(\\tau\')\\right)\\,d\\tau\' \\]with the integral kernel \\[ K(\\tau,\\tau\') = \\begin{cases} \\tau\'(1-\\tau) & \\text{for }\\tau\'\\le\\tau \\\\ \\tau(1-\\tau\') & \\text{for }\\tau\'>\\tau \\end{cases}, \\]and the normalized time \\[ \\tau = \\frac{t-t_A}{T}\\qquad\\text{with}\\qquad T=t_B-t_A. \\]The kinematic positions ${\\M r}_\\epsilon(\\tau)$ as pseudo observations are taken from rightHandSide. From these positions the influence of the reference forces $\\M f_0(\\tau)$ is subtracted which are computed with the background models in rightHandSide. The integral is solved by the integration of a moving interpolation polynomial of degree integrationDegree. The boundary values ${\\M r}_A$ and ${\\M r}_B$ (satellite\'s state vector) are estimated per arc and are usually directly eliminated if keepSatelliteStates is not set.
The unknown gravity field $\\nabla V(\\M r, t)$ parametrized by parametrizationGravity is not evaluated at the observed positions but at the orbit given by inputfileOrbit. The same is true for the reference forces. The linearized effect of the gravity field change by the position adjustment is taken into account by gradientfield. This may be a low order field up to a spherical harmonics degree of $n=2$ or $n=3$.
The inputfileOrbit, inputfileStarCamera, and inputfileAccelerometer must be synchronous and must be given with a constant sampling and without any gaps in each short arc (see InstrumentSynchronize). The kinematic positions ${\\M r}_\\epsilon(\\tau)$ should not given equally spaced in time but must be divided into the same arcs as the other instrument data. The observation equations are interpolated to this time by a polynomial interpolation with degree interpolationDegree.
The accuracy or the full covariance matrix of the precise orbit data is provided in covariancePod and can be estimated with PreprocessingPod.
The observation equations for precise orbit data (POD) are given by \\[ \\ddot{\\M r}(t) - \\M g_0(t) = \\nabla V(\\M r, t), \\]where the accelerations of the satellite $\\ddot{\\M r}(t)$ are derived from the kinematic positions in rightHandSide. The orbit differentation is performed by a moving polynomial interpolation or approximation with degree interpolationDegree and number of used epochs numberOfEpochs. The reference forces $\\M g_0(t)$ are computed with the background models in rightHandSide.
The unknown gravity field $\\nabla V(\\M r, t)$ parametrized by parametrizationGravity is not evaluated at the observed positions but at the orbit given by inputfileOrbit. The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but in most cases the kinematic orbit provides good results.
The accuracy or the full covariance matrix of the precise orbit data is provided in covariancePod and can be estimated with PreprocessingPod.
The observation equations for precise orbit data (POD) are given by \\[ \\frac{1}{2}\\dot{\\M r}^2 -\\dot{\\M r} \\cdot (\\M\\Omega\\times\\M r) +\\int_{t_0}^t(\\dot{\\M\\Omega}\\times\\M r)\\cdot \\dot{\\M r}\\,dt - \\int_{t_0}^t \\M g_0 \\cdot\\dot{\\M r}\'\\,dt = V + E. \\]where the velocities of the satellite $\\dot{\\M r}(t)$ are derived from the kinematic positions in rightHandSide and the Earth\'s rotation vector $\\M\\Omega(t)$ is modeled within earthRotation. The orbit differentiation is performed by a polynomial interpolation with degree interpolationDegree. The integrals are solved by a polynomial interpolation with degree integrationDegree. The reference forces $\\M g_0(t)$ are computed with the background models in rightHandSide.
The unknown gravity potential $V(\\M r)$ parametrized by parametrizationGravity is not evaluated at the observed positions but at the orbit given by inputfileOrbit. The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but in most cases the kinematic orbit provides good results.
An unknown energy bias $E$ per arc is parametrized by parametrizationBias and should be a constant in theory but temporal changes might help to absorb other unmodelled effects.
The accuracy or the full covariance matrix of the precise orbit data is provided in covariancePod and can be estimated with PreprocessingPod.
The following parameters with parameter names are set up: *:<parametrizationGravity>:*:*.
SstVariational
Like observation:podVariational (see there for details) but with two satellites and additional satellite-to-satellite (SST) observations.
If multiple inputfileSatelliteTracking are given, all data are added together. So corrections in extra files like the light time correction can easily be added. Empirical parameters for the SST observations can be set up with parametrizationSst. The accuracy or the full covariance matrix of SST is provided in covarianceSst.
Like observation:podIntegral (see there for details) but with two satellites and additional satellite-to-satellite (SST) observations.
If multiple inputfileSatelliteTracking are given all data are added together. So corrections in extra files like the light time correction can easily be added. Empirical parameters for the SST observations can be set up with parametrizationSst. The accuracy or the full covariance matrix of SST is provided in covarianceSst.
Both SST observation types are reduced by the same background models and the same impact of accelerometer measurements. The covariance matrix of the reduced observations should not consider the instrument noise only (covarianceSst1/2) but must take the cross correlations covarianceAcc into account. The covariance matrix of the reduced observations is given by \\[ \\M\\Sigma(\\begin{bmatrix} \\Delta l_{SST1} \\\\ \\Delta l_{SST2} \\end{bmatrix}) = \\begin{bmatrix} \\M\\Sigma_{SST1} + \\M\\Sigma_{ACC} & \\M\\Sigma_{ACC} \\\\ \\M\\Sigma_{ACC} & \\M\\Sigma_{SST2} + \\M\\Sigma_{ACC} \\end{bmatrix}. \\] The following parameters with parameter names are set up:
The accuracy or the full covariance matrix of the gradiometer is provided in covarianceSgg and can be estimated with PreprocessingGradiometer.
The following parameters with parameter names are set up: *:<parametrizationGravity>:*:*.
Terrestrial
The gravity field is estimated from point wise measurements. The gravity field parametrization is given by parametrizationGravity. There is no need to have the data regular distributed or given on a sphere or ellipsoid. The type of the gridded data (e.g gravity anomalies or geoid heights) must be set with kernel. A referencefield can be reduced beforehand.
The observations at given positions are calculated from inputfileGriddedData. The input columns are enumerated by data0, data1, , see dataVariables.
The observations can be divided into small blocks for parallelization. With blockingSize set the maximum count of observations in each block.
The following parameters with parameter names are set up: *:<parametrizationGravity>:*:*.
Deflections
The gravity field parametrized by parametrizationGravity is estimated from deflections of the vertical measurements. A referencefield can be reduced beforehand.
The observations $\\xi$ in north direction and $\\eta$ in east direction at given positions are calculated from inputfileGriddedData. The input columns are enumerated by data0, data1, , see dataVariables.
The ellipsoid parameters R and inverseFlattening are used to define the local normal direction.
The observations can be divided into small blocks for parallelization. With blockingSize set the maximum count of observations in each block.
The following parameters with parameter names are set up: *:<parametrizationGravity>:*:*.
StationLoading
Observation equations for displacements of a list of stations due to the effect of time variable loading masses. The displacement $\\M u$ of a station is calculated according to \\[ \\M u(\\M r) = \\frac{1}{\\gamma}\\sum_{n=0}^\\infty \\left[\\frac{h_n}{1+k_n}V_n(\\M r)\\,\\M e_{up} + R\\frac{l_n}{1+k_n}\\left( \\frac{\\partial V_n(\\M r)}{\\partial \\M e_{north}}\\M e_{north} +\\frac{\\partial V_n(\\M r)}{\\partial \\M e_{east}} \\M e_{east}\\right)\\right], \\]where $\\gamma$ is the normal gravity, the load Love and Shida numbers $h_n,l_n$ are given by inputfileDeformationLoadLoveNumber and the load Love numbers $k_n$ are given by inputfilePotentialLoadLoveNumber. The $V_n$ are the spherical harmonics expansion of degree $n$ of the full time variable gravitational potential (potential of the loading mass + deformation potential) parametrized by parametrizationGravity. Additional parameters can be setup to estimate the realization of the reference frame of the station coordinates (estimateTranslation, estimateRotation, and estimateScale).
The observations at stations coordinates are calculated from inputfileGriddedData. The input columns are enumerated by data0, data1, , see dataVariables.
The ellipsoid parameters R and inverseFlattening are used to define the local frame (north, east, up).
Reference: Rietbroek (2014): Retrieval of Sea Level and Surface Loading Variations from Geodetic Observations and Model Simulations: an Integrated Approach, Bonn, 2014. - Dissertation, https://nbn-resolving.org/urn:nbn:de:hbz:5n-35460'},
'orbitPropagatorType': { 'name': 'orbitPropagatorType', 'key': 'orbitPropagatorType', 'description': 'Implements the propagation of a satellite orbit under the influence of as used in SimulateOrbit (dynamic orbits from numerical orbit integration). This class implements Euler\'s method to propagate a satellite orbit under the influence of . Satellite is assumed to be oriented along-track. This class implements the classical Runge-Kutta 4 method of orbit propagation for satellite orbit under the influence of . No step-width control or other advanced features are implemented. Satellite is assumed to be oriented along-track. See: Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits This class implements the Adams-Moulton class of predictor-corrector orbit propagators for a satellite orbit under the influence of using an implicit Adams-Bashforth corrector. The coefficients for the propagator are derived using the equations given in section 4.2.3 of [1]. Satellite is assumed to be oriented along-track. [1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits This class implements the Stoermer-Cowell class of predictor-corrector orbit propagators for a satellite orbit under the influence of . The coefficients for the Stoermer predictor and Cowell corrector are derived using the equations given in section 4.2.6 of [1]. Stoermer-Cowell is a double integration algorithm, yielding positions directly from accelertions. It does not produce velocities. The velocities are derived using Adams-type propagators as suggested in [2]. Satellite is assumed to be oriented along-track. [1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits [2] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.” This class implements the Gauss-Jackson multi-step predictor-corrector method to propagate a satellite orbit under the influence of . Satellite is assumed to be oriented along-track. Implementation is based on [1]. [1] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.” This class implements an integration Polynomial method to propagate a satellite orbit under the influence of . Satellite is assumed to be oriented along-track. Implementation is based on code by Torsten Mayer-Gürr. Reads an orbit from file. If the needed epochs are not given an exception is thrown.', 'config_table': 'order uint Order of the Adams-Bashforth type propagator. applyMoultonCorrector boolean Corrector step after Adams-Bashforth predcition. warmup orbitPropagatorType order uint Order of the Stoermer-Cowell type propagator. warmup orbitPropagatorType order uint of Gauss-Jackson method. warmup orbitPropagatorType correctorIterations uint Maximum number of iterations to run the corrector step for. epsilon double Convergence criteria for position, velocity, and acceleration tests. degree uint polynomial degree to integrate accelerations shift int shift polynomial in future (predicted accelerations) epsilon double [m] max. position change to recompute forces warmup orbitPropagatorType to compute epochs before start epoch corrector boolean apply corrector iteration if position change is larger than epsilon inputfileOrbit filename epoch at timeStart is not used margin double [seconds] to find identical times recomputeForces boolean', 'display_text': 'Implements the propagation of a satellite orbit under the influence of forces as used in SimulateOrbit (dynamic orbits from numerical orbit integration).
Euler
This class implements Euler\'s method to propagate a satellite orbit under the influence of Forces. Satellite is assumed to be oriented along-track.
RungeKutta4
This class implements the classical Runge-Kutta 4 method of orbit propagation for satellite orbit under the influence of Forces. No step-width control or other advanced features are implemented. Satellite is assumed to be oriented along-track. See: Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits
AdamsBashforthMoulton
This class implements the Adams-Moulton class of predictor-corrector orbit propagators for a satellite orbit under the influence of Forces using an implicit Adams-Bashforth corrector. The coefficients for the propagator are derived using the equations given in section 4.2.3 of [1]. Satellite is assumed to be oriented along-track. [1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits
StoermerCowell
This class implements the Stoermer-Cowell class of predictor-corrector orbit propagators for a satellite orbit under the influence of Forces. The coefficients for the Stoermer predictor and Cowell corrector are derived using the equations given in section 4.2.6 of [1]. Stoermer-Cowell is a double integration algorithm, yielding positions directly from accelertions. It does not produce velocities. The velocities are derived using Adams-type propagators as suggested in [2]. Satellite is assumed to be oriented along-track. [1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits [2] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.”
GaussJackson
This class implements the Gauss-Jackson multi-step predictor-corrector method to propagate a satellite orbit under the influence of Forces. Satellite is assumed to be oriented along-track. Implementation is based on [1]. [1] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.”
Polynomial
This class implements an integration Polynomial method to propagate a satellite orbit under the influence of Forces. Satellite is assumed to be oriented along-track. Implementation is based on code by Torsten Mayer-Gürr.
File
Reads an orbit from file. If the needed epochs are not given an exception is thrown.'},
'parameterNamesType': { 'name': 'parameterNamesType', 'key': 'parameterNamesType', 'description': 'Generates a list of parameter names. All parameters are appended. The parameter is given explicitly by four parts: object: Object this parameter refers to, e.g. graceA , G023 , earth , type: Type of this parameter, e.g. accBias , position.x , temporal: Temporal representation of this parameter, e.g. trend , polynomial.degree1 , interval: Interval/epoch this parameter represents, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00 , 2018-01-01_00-00-00 . Read parameter names from file . Parameter names of gravity . An additional object name can be included in the parameter names. Parameter names of satellite acceleration . Arc related parameters are appended if an is provided which defines the arc structure. An additional object name can be included in the parameter names. Parameter names of satellite tracking . An additional object name can be included in the parameter names. Parameter names from temporal parametrization. It is possible to setup the temporal parameters for each . Parameter names of GNSS antenna center variation . An additional object name (antenna name) can be included in the parameter names. It is possible to setup the parameters for each . Parameter names used in . Replaces parts of s. The star " * " left this part untouched. Select a subset of s using . Removes all duplicate names (keep first) from .', 'config_table': 'object string object this parameter refers to, e.g. graceA, G023, earth type string type of this parameter, e.g. accBias, position.x temporal string temporal representation of this parameter, e.g. trend, polynomial.degree1 interval string interval/epoch this parameter refers to, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2008-01-01_00-00-00 inputfileParameterNames filename file with parameter names object string object these parameters refers to, e.g. earth parametrization parametrizationGravityType object string object these parameters refers to, e.g. graceA, G023 parameterization parametrizationAccelerationType inputfileInstrument filename defines the arc structure for arc related parameters object string object these parameters refers to, e.g. grace1.grace2 parameterization parametrizationSatelliteTrackingType parameterNameBase parameterNamesType parametrizationTemporal parametrizationTemporalType object string antenna name parametrization parametrizationGnssAntennaType gnssType gnssType e.g. C1CG** observation observationType parameterName parameterNamesType object string *: left this part untouched, object type string *: left this part untouched, type temporal string *: left this part untouched, temporal representation interval string *: left this part untouched, interval/epoch parameterName parameterNamesType parameterSelection parameterSelectorType parameter order/selection parameterName parameterNamesType', 'display_text': 'Generates a list of parameter names. All parameters are appended.
Name
The parameter is given explicitly by four parts:
object: Object this parameter refers to, e.g. graceA, G023, earth,
type: Type of this parameter, e.g. accBias, position.x,
temporal: Temporal representation of this parameter, e.g. trend, polynomial.degree1,
interval: Interval/epoch this parameter represents, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2018-01-01_00-00-00.
Parameter names of gravity parametrization. An additional object name can be included in the parameter names.
Acceleration
Parameter names of satellite acceleration parametrization. Arc related parameters are appended if an inputfileInstrument is provided which defines the arc structure. An additional object name can be included in the parameter names.
SatelliteTracking
Parameter names of satellite tracking parametrization. An additional object name can be included in the parameter names.
Temporal
Parameter names from temporal parametrization. It is possible to setup the temporal parameters for each parameterNameBase.
GnssAntenna
Parameter names of GNSS antenna center variation parametrization. An additional object name (antenna name) can be included in the parameter names. It is possible to setup the parameters for each gnssType.
Removes all duplicate names (keep first) from parameterName.'},
'parameterSelectorType': { 'name': 'parameterSelectorType', 'key': 'parameterSelectorType', 'description': 'This class provides an index vector from selected parameters, which can be used e.g. to reorder a normal equation matrix. The size of the index vector determines the size of the new matrix. Entries are the indices of the selected parameters in the provided parameter list or NULLINDEX for zero/new parameters. Parameter index vector from name. Name matching supports wildcards * for any number of characters and ? for exactly one character. Does not add zero/empty parameters if there are no matches. Parameter index vector from list of parameter names. Parameter index vector from range. Parameter index vector from matrix. Expand parameter index vector by adding zero parameters. Groups a set of s and has no further effect itself. Parameter index vector from a complement of other parameter selector(s).', 'config_table': 'object string object this parameter refers to, e.g. graceA, G023, earth (wildcards: * and ?) type string type of this parameter, e.g. accBias, position.x (wildcards: * and ?) temporal string temporal representation of this parameter, e.g. trend, polynomial.degree1 (wildcards: * and ?) interval string interval/epoch this parameter refers to, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2008-01-01_00-00-00 (wildcards: * and ?) parameterName parameterNamesType start expression start at this index (variables: length) count expression count of parameters, default: all parameters to the end (variables: length) inputfileMatrix filename index in old parameter list or -1 for new parameter column expression use this column (counting from 0, variables: columns) startRow expression start at this row (counting from 0, variables: rows) countRows expression use these many rows (default: use all, variables: rows) count expression count of zero parameters (variables: length) parameterSelection parameterSelectorType parameter order/selection parameterSelection parameterSelectorType parameter order/selection', 'display_text': 'This class provides an index vector from selected parameters, which can be used e.g. to reorder a normal equation matrix. The size of the index vector determines the size of the new matrix. Entries are the indices of the selected parameters in the provided parameter list or NULLINDEX for zero/new parameters.
Wildcard
Parameter index vector from name. Name matching supports wildcards * for any number of characters and ? for exactly one character. Does not add zero/empty parameters if there are no matches.
Names
Parameter index vector from list of parameter names.
Range
Parameter index vector from range.
Matrix
Parameter index vector from matrix.
Zeros
Expand parameter index vector by adding zero parameters.
Parameter index vector from a complement of other parameter selector(s).'},
'parametrizationAccelerationType': { 'name': 'parametrizationAccelerationType', 'key': 'parametrizationAccelerationType', 'description': 'This class defines parameters of satellite accelerations. It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended. Oscillation once, twice, ... per revolution in Satellite Reference Frame (SRF) with the argument of latitude as input angle. If the attitude of the satellite is not provided the Celestial Reference Frame (CRF) is used instead. Paramters are estimated in . The parameter names are *:perRevolution.cos(<order>*u).x::<interval> , *:perRevolution.cos(<order>*u).y::<interval> , *:perRevolution.cos(<order>*u).z::<interval> , *:perRevolution.sin(<order>*u).x::<interval> , *:perRevolution.sin(<order>*u).y::<interval> , *:perRevolution.sin(<order>*u).z::<interval> . Temporal changing accelerometer bias per axis in in Satellite Reference Frame (SRF). If the attitude of the satellite is not provided the Celestial Reference Frame (CRF) is used instead. The parameter names are *:accBias.x:*:* , *:accBias.y:*:* , *:accBias.z:*:* . Accelerometer scale factor per axis. The parameter names are *:accScale.x:<temporal>:<interval> , *:accScale.y:<temporal>:<interval> , *:accScale.z:<temporal>:<interval> , *:accScaleCross.xy:<temporal>:<interval> , *:accScaleCross.xz:<temporal>:<interval> , *:accScaleCross.yz:<temporal>:<interval> , *:accScaleRotation.xy:<temporal>:<interval> , *:accScaleRotation.xz:<temporal>:<interval> , *:accScaleRotation.yz:<temporal>:<interval> . This parametrization needs the attitude of the satellite. GNSS solar radiation pressure model. Paramters are estimated in . The parameter names are *:solarRadiationPressure.ECOM.D0:*:* , *:solarRadiationPressure.ECOM.DC2:*:* , *:solarRadiationPressure.ECOM.DS2:*:* , *:solarRadiationPressure.ECOM.DC4:*:* , *:solarRadiationPressure.ECOM.DS4:*:* , *:solarRadiationPressure.ECOM.Y0:*:* , *:solarRadiationPressure.ECOM.B0:*:* , *:solarRadiationPressure.ECOM.BC1:*:* , *:solarRadiationPressure.ECOM.BS1:*:* , *:solarRadiationPressure.ECOM.BC3:*:* , *:solarRadiationPressure.ECOM.BS3:*:* . This parametrization needs the attitude of the satellite. Estimate the thermospheric density along the orbit using a satllite macro model. An optional thermospheric model can be used to compute temperature and wind. The temperature is used to estimate variable drag and lift coefficients, otherwise a constant drag coefficient is used. The density is estimated in . The parameter names are *:density:<temporal>:<interval> . This parametrization needs the macro model and the attitude of the satellite. Estimate a scale factor for a given model. The parameter names are *:modelScale:<temporal>:<interval> .', 'config_table': 'order uint once, twice, ... estimateX boolean along estimateY boolean cross estimateZ boolean radial interval timeSeriesType setup new parameters each interval perArc boolean estimateX boolean along estimateY boolean cross estimateZ boolean radial temporal parametrizationTemporalType perArc boolean inputfileAccelerometer filename estimateX boolean along estimateY boolean cross estimateZ boolean radial estimateCrossTalk boolean non-orthognality of axes estimateRotation boolean misalignment temporal parametrizationTemporalType perArc boolean estimateD0 boolean constant term along D-axis (sat-sun vector) estimateD2 boolean 2-per-rev terms along D-axis estimateD4 boolean 4-per-rev terms along D-axis estimateY0 boolean constant term along Y-axis (solar panel axis) estimateB0 boolean constant term along B-axis (cross product D x Y) estimateB1 boolean 1-per-rev terms along B-axis estimateB3 boolean 3-per-rev terms along B-axis perArc boolean eclipse eclipseType thermosphere thermosphereType for wind and temperature earthRotation double [rad/s] considerTemperature boolean compute drag and lift, otherwise simple drag coefficient is used considerWind boolean temporalDensity parametrizationTemporalType parameters along orbit perArc boolean miscAccelerations miscAccelerationsType temporal parametrizationTemporalType perArc boolean', 'display_text': 'This class defines parameters of satellite accelerations. It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended.
PerRevolution
Oscillation once, twice, ... per revolution in Satellite Reference Frame (SRF) with the argument of latitude as input angle. If the attitude of the satellite is not provided the Celestial Reference Frame (CRF) is used instead. Paramters are estimated in $[nm/s^2=10^{-9}\\,m/s^2]$.
Temporal changing accelerometer bias per axis in $[m/s^2]$ in Satellite Reference Frame (SRF). If the attitude of the satellite is not provided the Celestial Reference Frame (CRF) is used instead.
This parametrization needs the attitude of the satellite.
ThermosphericDensity
Estimate the thermospheric density along the orbit using a satllite macro model. An optional thermospheric model can be used to compute temperature and wind. The temperature is used to estimate variable drag and lift coefficients, otherwise a constant drag coefficient is used. The density is estimated in $[kg/m^3]$.
This parametrization needs the macro model and the attitude of the satellite.
ModelScale
Estimate a scale factor for a given model. The parameter names are *:modelScale:<temporal>:<interval>.'},
'parametrizationGnssAntennaType': { 'name': 'parametrizationGnssAntennaType', 'key': 'parametrizationGnssAntennaType', 'description': 'Parametrization of antenna center variations. It will be used to set up the design matrix in a least squares adjustment. Usually the parametrization is setup separately for different . If multiple parametrizations are given the parameters are sequently appended in the design matrix and parameter vector. Antenna center or, if setup for a specific , phase/code center offset (e.g. *1*G for GPS L1 phase center offset) in . The parameter names are *:antennaCenter.x:*:* , *:antennaCenter.y:*:* , *:antennaCenter.z:*:* . Parametrization of antenna center variations in in terms of spherical harmonics. As usually only data above the horizon are observed only the even spherical harmonics (degree/order even), which are symmetric to the equator, are setup. The total count of parameters is and the parameter names are *:antennaCenterVariations.sphericalHarmonics.c_<degree>_<order>:*:* , *:antennaCenterVariations.sphericalHarmonics.s_<degree>_<order>:*:* . Parametrization of antenna center variations with radial basis functions where in the coefficients which has to be estimated and are the basis functions The parameter names are *:antennaCenterVariations.radialBasis.<index>.<total count>:*:* .', 'config_table': 'estimateX boolean estimateY boolean estimateZ boolean minDegree uint min degree maxDegree uint max degree grid gridType nodal points of shannon kernels minDegree uint min degree of shannon kernel maxDegree uint max degree of shannon kernel', 'display_text': 'Parametrization of antenna center variations. It will be used to set up the design matrix in a least squares adjustment. Usually the parametrization is setup separately for different gnssType.
If multiple parametrizations are given the parameters are sequently appended in the design matrix and parameter vector.
Center
Antenna center or, if setup for a specific gnssType, phase/code center offset (e.g. *1*G for GPS L1 phase center offset) in $[m]$.
Parametrization of antenna center variations in $[m]$ in terms of spherical harmonics. As usually only data above the horizon are observed only the even spherical harmonics (degree/order $m+n$ even), which are symmetric to the equator, are setup.
The total count of parameters is $((n_{max}+1)(n_{max}+2)-n_{min}(n_{min}+1)/2$ and the parameter names are
Parametrization of antenna center variations with radial basis functions \\[ ACV(\\M x(A, E)) = \\sum_i a_i \\Phi(\\M x\\cdot\\M x_i) \\]where $a_i$ in $[m]$ the coefficients which has to be estimated and $\\Phi$ are the basis functions \\[ \\Phi(\\cos\\psi) = \\sum_n \\sqrt{2n+1}P_n(\\cos\\psi). \\] The parameter names are *:antennaCenterVariations.radialBasis.<index>.<total count>:*:*.
'},
'parametrizationGravityType': { 'name': 'parametrizationGravityType', 'key': 'parametrizationGravityType', 'description': 'This class gives a parametrization of the time depending gravity field. Together with the class it will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended. The potential is parametrized by a expansion of (fully normalized) spherical harmonics You can set the range of degree with minDegree and maxDegree . The sorting sequence of the potential coefficients in the parameter vector can be defined by . The total count of parameters is and the parameter names are *:sphericalHarmonics.c_<degree>_<order>:*:* , *:sphericalHarmonics.s_<degree>_<order>:*:* . The potential is represented by a sum of space localizing basis functions where the coefficients which has to be estimated and are the basis functions given by isotropic radial functions The basis functions are located on a grid given by . This class can also be used to estimate point masses if is set to density. The parameter names are *:radialBasis.<index>.<total count>:*:* . The time variable potential is given by wehre is the spatial parametrization of the gravity field and can be choosen with . The parametrization in time domain is selected by . The total parameter count is the parameter count of times the parameter count of . Parametrization of the gravity field on the basis of a linear transformation of a source parametrization. The linear transformation changes the original solution space represented by from to through the linear transformation . It follows that the rows of the matrix in inputfileTransformationMatrix coincides with the number of parameters in . The new parameter count is given by the number of columns in and may be smaller, equal or larger than the original parameter count. The parameter names are *:transformedParameter.<index>.<total count>:*:* . This class is used to estimate the earthquake oscillation function parameters, i.e. , , and . The results describes the variation in the gravitational potential field caused by large earthquakes. with and . In this equation, is the attenuation factor, is the overtone factor, is degree, is order, and is time after earthquake in second. The parameter names are *:earthquakeParameter.c_<degree>_<order>_A:*:* , *:earthquakeParameter.s_<degree>_<order>_A:*:* , *:earthquakeParameter.c_<degree>_<order>_W:*:* , *:earthquakeParameter.s_<degree>_<order>_W:*:* , *:earthquakeParameter.c_<degree>_<order>_P:*:* , *:earthquakeParameter.s_<degree>_<order>_P:*:* .', 'config_table': 'minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme kernel kernelType shape of the radial basis function grid gridType nodal point distribution parametrizationGravity parametrizationGravityType parametrizationTemporal parametrizationTemporalType parametrizationGravitySource parametrizationGravityType inputfileTransformationMatrix filename transformation matrix from target to source parametrization (rows of this matrix must coincide with the parameter count of the source parametrization) inputInitialCoefficient filename initial values for oscillation parameters time0 time the time earthquake happened minDegree uint maxDegree uint GM double Geocentric gravitational constant R double reference radius numbering sphericalHarmonicsNumberingType numbering scheme', 'display_text': 'This class gives a parametrization of the time depending gravity field. Together with the class oberservation it will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended.
SphericalHarmonics
The potential $V$ is parametrized by a expansion of (fully normalized) spherical harmonics \\[ V(\\lambda,\\vartheta,r) = \\frac{GM}{R}\\sum_{n=0}^\\infty \\sum_{m=0}^n \\left(\\frac{R}{r}\\right)^{n+1} \\left(c_{nm} C_{nm}(\\lambda,\\vartheta) + s_{nm} S_{nm}(\\lambda,\\vartheta)\\right). \\]You can set the range of degree $n$ with minDegree and maxDegree. The sorting sequence of the potential coefficients in the parameter vector can be defined by numbering.
The total count of parameters is $(n_{max}+1)^2-n_{min}^2$ and the parameter names are
*:sphericalHarmonics.c_<degree>_<order>:*:*,
*:sphericalHarmonics.s_<degree>_<order>:*:*.
RadialBasis
The potential $V$ is represented by a sum of space localizing basis functions \\[ V(\\M x) = \\sum_i a_i \\Phi(\\M x, \\M x_i) \\]where $a_i$ the coefficients which has to be estimated and $\\Phi$ are the basis functions given by isotropic radial kernel functions \\[ \\Phi(\\cos\\psi,r,R) = \\sum_n \\left(\\frac{R}{r}\\right)^{n+1} k_n\\sqrt{2n+1}\\bar{P}_n(\\cos\\psi). \\]The basis functions are located on a grid $\\M x_i$ given by grid. This class can also be used to estimate point masses if kernel is set to density.
The parameter names are *:radialBasis.<index>.<total count>:*:*.
Temporal
The time variable potential is given by \\[ V(\\M x,t) = \\sum_i V_i(\\M x)\\Psi_i(t), \\]wehre $V_i(\\M x)$ is the spatial parametrization of the gravity field and can be choosen with parametrizationGravity. The parametrization in time domain $\\Psi_i(t)$ is selected by parametrizationTemporal. The total parameter count is the parameter count of parametrizationTemporal times the parameter count of parametrizationGravity.
LinearTransformation
Parametrization of the gravity field on the basis of a linear transformation of a source parametrization. The linear transformation changes the original solution space represented by pararametrizationGravitySource from \\[ \\mathbf{l} = \\mathbf{A}\\mathbf{x} + \\mathbf{e} \\]to \\[ \\mathbf{l} = \\mathbf{A}\\mathbf{F}\\mathbf{y} + \\mathbf{e} \\]through the linear transformation $\\mathbf{x}=\\mathbf{F}\\mathbf{y}$. It follows that the rows of the matrix $\\mathbf{F}$ in inputfileTransformationMatrix coincides with the number of parameters in pararametrizationGravitySource. The new parameter count is given by the number of columns in $\\mathbf{F}$ and may be smaller, equal or larger than the original parameter count.
The parameter names are *:transformedParameter.<index>.<total count>:*:*.
EarthquakeOscillation
This class is used to estimate the earthquake oscillation function parameters, i.e. $C_{nlm}$, $\\omega_{nlm}$, and $P_{nlm}$. The results describes the variation in the gravitational potential field caused by large earthquakes. \\[ C_{lm}(\\M t) = \\sum_{n=0}^NC_{nlm}(1-\\cos(\\omega_{nlm}d\\M t)\\exp(P_{nlm}\\omega_{nlm}d\\M t)), \\]with $\\omega_{nlm}=\\frac{2\\pi}{T_{nlm}}$ and $P_{nlm}=\\frac{-1}{2Q_{nlm}}$ . In this equation, $Q_{nlm}$ is the attenuation factor, $n$ is the overtone factor, $m$ is degree, $l$ is order, and $t$ is time after earthquake in second.
'},
'parametrizationSatelliteTrackingType': { 'name': 'parametrizationSatelliteTrackingType', 'key': 'parametrizationSatelliteTrackingType', 'description': 'This class defines parameters of Satellite-to-Satellite tracking observations. It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended. Estimate the KBR antenna phase centre (APC) coordinates in for each spacecraft in satellite reference frame (SRF) as constant per axis. The observation equations are computed by taking the derivative of the antenna offset correction equation w.r.t. the KBR APC coordinates. The parameter names are satellite1.satellite2:sstAntennaCenter1.x:*:* , satellite1.satellite2:sstAntennaCenter1.y:*:* , satellite1.satellite2:sstAntennaCenter1.z:*:* , satellite1.satellite2:sstAntennaCenter2.x:*:* , satellite1.satellite2:sstAntennaCenter2.y:*:* , satellite1.satellite2:sstAntennaCenter2.z:*:* . Estimate bias for SST observations in or ]. The temporal variation is defined by . The parameter names are satellite1.satellite2:sstBias:<temporal>:<interval> . Estimate scale factor for SST observations with respect to reference SST data . The temporal variation is defined by . The parameter names are satellite1.satellite2:sstScale:<temporal>:<interval> . Estimate time shift in seconds in SST observations, with defined temporal variation by . The design matrix is computed by taking the derivative of the ranging data w.r.t. time. The parameter names are satellite1.satellite2:sstTimeBias:<temporal>:<interval> . Estimate scale factors for deterministic signal models from satellite tracking instrument file , see EnsembleAveragingScaleModel . Amplitude variation of model waveforms is defined by . The parameter names are satellite1.satellite2:scaleModel:<temporal>:<interval> . Estimate deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. These events can be time-indexed beforehand using satellite position and orientation, see GraceSstSpecialEvents . The shape of this short-period waveform is nearly constant within one month and can be approximated by a polynomial. The amplitude variation of the waveform can also be taken into account by . The parameter names are satellite1.satellite2:<type>.legendrePolynomial.n<degree>:<temporal>:<interval> .', 'config_table': 'estimate1X boolean along (satellite 1) estimate1Y boolean cross (satellite 1) estimate1Z boolean nadir (satellite 1) estimate2X boolean along (satellite 2) estimate2Y boolean cross (satellite 2) estimate2Z boolean nadir (satellite 2) interpolationDegree uint differentiation by polynomial approximation of degree n temporal parametrizationTemporalType perArc boolean inputfileSatelliteTracking filename temporal parametrizationTemporalType perArc boolean polynomialDegree uint polynomial degree temporal parametrizationTemporalType perArc boolean inputfileSatelliteTracking filename temporal parametrizationTemporalType perArc boolean inputfileEvents filename instrument with GRACE events type choice eclipse1 eclipse2 starCameraBox1 starCameraBox2 starCameraBox3 starCameraBox4 starCameraBox5 starCameraBox6 marginLeft double margin size (on both sides) [seconds] marginRight double margin size (on both sides) [seconds] minNumberOfEvents uint min. number of events to setup parameters polynomialDegree uint polynomial degree temporal parametrizationTemporalType', 'display_text': 'This class defines parameters of Satellite-to-Satellite tracking observations. It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended.
AntennaCenter
Estimate the KBR antenna phase centre (APC) coordinates in $[m]$ for each spacecraft in satellite reference frame (SRF) as constant per axis. The observation equations are computed by taking the derivative of the antenna offset correction equation w.r.t. the KBR APC coordinates.
The parameter names are satellite1.satellite2:sstScale:<temporal>:<interval>.
TimeBias
Estimate time shift in seconds in SST observations, with defined temporal variation by parametrizationTemporal. The design matrix is computed by taking the derivative of the ranging data w.r.t. time.
The parameter names are satellite1.satellite2:sstTimeBias:<temporal>:<interval>.
The parameter names are satellite1.satellite2:scaleModel:<temporal>:<interval>.
SpecialEffect
Estimate deterministic signals in the GRACE K-Band measurements caused by Sun intrusions into the star camera baffles of GRACE-A and eclipse transits of the satellites. These events can be time-indexed beforehand using satellite position and orientation, see GraceSstSpecialEvents. The shape of this short-period waveform is nearly constant within one month and can be approximated by a polynomial. The amplitude variation of the waveform can also be taken into account by parametrizationTemporal.
The parameter names are satellite1.satellite2:<type>.legendrePolynomial.n<degree>:<temporal>:<interval>.'},
'parametrizationTemporalType': { 'name': 'parametrizationTemporalType', 'key': 'parametrizationTemporalType', 'description': 'This class gives a parametrization of time depending parameters (gravity field, positions, ...). It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended. Useally time intervals are defined half open meaning the last time belongs not to the interval. This behaviour can be changed for the last interval with includeLastTime . Represents a parameter being constant in time in each interval . The parameter names are *:*:*:<interval> . A time variable function is given by a linear trend with is timeStart and is timeStep in days. A constant term is not included and must added separately. The parameter name is *:*:trend.<timeStep(days)>*(t-<timeStart>):* . A time variable function is given by with the (spatial) coefficients as parameters and the temporal basis functions . Basis splines are defined as polynomials of degree in intervals between nodal points in time , for details see basis splines . The parameters are ordered timewise. First all parameters of then and so on. The total parameter count in each interval is , where is the count of time points from in each interval and is the degree . The parameter names are *:*:spline.n<degree>:<interval of each spline> . A time variable function is represented by Legendre polynomials in each interval . The time is normed to in each interval. The total parameter count is , where is the polynmial degree and the number of intervals with the parameter names *:*:legendrePolynomial.n<degree>:<interval> . A time variable function is given by a oscillation with , is timeStart and is timePeriod in days. The parameter names are *:*:oscillation.cos(2*pi/<period(days)>*(t-<timeStart>)):* and *:*:oscillation.sin(2*pi/<period(days)>*(t-<timeStart>)):* . A time variable function is given by a fourier expansion with the normalized time and is timeStart , is timeEnd in each interval and is the fourierDegree . The total parameter count is , where is the number of intervals. The parameters are sorted in following order: with the parameter names *:*:fourier.cos(<m>*x):<interval> and *:*:fourier.sin(<m>*x):<interval> . The time variable function is given by a fourier expansion where are the arguments of the tide constituents where are the Doodson\'s fundamental arguments ( ) and are the Doodson multipliers for the term at frequency . The multipliers must be given by coded as Doodson number (e.g. 255.555) or as names intoduced by Darwin (e.g. M2). The major constituents given by can be used to interpolate minor tidal constituents using the file . This file can be created with DoodsonHarmonicsCalculateAdmittance . The total parameter count is with the number of doodson frequencies. The parameters are sorted in following order: with the parameter names *:*:doodson.cos(<doodsonName>):* and *:*:doodson.sin(<doodsonName>):* .', 'config_table': 'interval timeSeriesType includeLastTime boolean timeStart time reference time timeStep time degree uint degree of splines timeSeries timeSeriesType nodal points in time domain intervals timeSeriesType includeLastTime boolean polynomialDegree uint polynomial degree interval timeSeriesType intervals of polynomials includeLastTime boolean period time [day] time0 time reference time fourierDegree uint interval timeSeriesType intervals of fourier series includeLastTime boolean doodson doodson code number (e.g. 255.555) or darwin name (e.g. M2) inputfileAdmittance filename interpolation of minor constituents', 'display_text': 'This class gives a parametrization of time depending parameters (gravity field, positions, ...). It will be used to set up the design matrix in a least squares adjustment. If multiple parametrizations are given the coefficients in the parameter vector are sequently appended.
Useally time intervals are defined half open meaning the last time belongs not to the interval. This behaviour can be changed for the last interval with includeLastTime.
Constant
Represents a parameter being constant in time in each interval.
A time variable function is given by a linear trend \\[ f(x,t) = \\frac{1}{T}(t-t_0) \\cdot f_t(x), \\]with $t_0$ is timeStart and $T$ is timeStep in days. A constant term is not included and must added separately.
The parameter name is *:*:trend.<timeStep(days)>*(t-<timeStart>):*.
Splines
A time variable function is given by \\[ f(x,t) = \\sum_i f_i(x)\\Psi_i(t), \\]with the (spatial) coefficients $f_i(x)$ as parameters and the temporal basis functions $\\Psi_i(t)$. Basis splines are defined as polynomials of degree $n$ in intervals between nodal points in time $t_i$, for details see basis splines.
The parameters are ordered timewise. First all parameters of $f_{i=1}(x)$ then $f_{i=2}(x)$ and so on. The total parameter count in each interval is $N=N_t+n-1$, where $N_t$ is the count of time points from timeSeries in each interval and $n$ is the degree.
The parameter names are *:*:spline.n<degree>:<interval of each spline>.
Polynomial
A time variable function is represented by Legendre polynomials in each interval. The time is normed to $[-1,1)$ in each interval.
The total parameter count is $(N+1)M$, where $N$ is the polynmial degree and $M$ the number of intervals with the parameter names*:*:legendrePolynomial.n<degree>:<interval>.
Oscillation
A time variable function is given by a oscillation \\[ f(x,t) = f^c(\\M x)\\cos(\\omega_i(t)) + f^s(\\M x)\\sin(\\omega_i(t)) \\]with $\\omega_i=\\frac{2\\pi}{T_i}(t-t_0)$, $t_0$ is timeStart and $T$ is timePeriod in days.
The parameter names are *:*:oscillation.cos(2*pi/<period(days)>*(t-<timeStart>)):* and *:*:oscillation.sin(2*pi/<period(days)>*(t-<timeStart>)):*.
Fourier
A time variable function is given by a fourier expansion \\[ f(x,t) = \\sum_{m=1}^M f_m^c(\\M x)\\cos(2\\pi m \\tau) + f_m^s(\\M x)\\sin(2\\pi m \\tau) \\]with the normalized time \\[ \\tau = \\frac{t-t_A}{t_B-t_A}, \\]and $t_A$ is timeStart, $t_B$ is timeEnd in each interval and $M$ is the fourierDegree.
The total parameter count is $2MN$, where $N$ is the number of intervals. The parameters are sorted in following order: $f_1^c, f_1^s, f_2^c, \\ldots$ with the parameter names*:*:fourier.cos(<m>*x):<interval> and *:*:fourier.sin(<m>*x):<interval>.
DoodsonHarmonic
The time variable function is given by a fourier expansion \\[ f(x,t) = \\sum_{i} f_i^c(x)\\cos(\\Theta_i(t)) + f_i^s(x)\\sin(\\Theta_i(t)), \\]where $\\Theta_i(t)$ are the arguments of the tide constituents $i$ \\[ \\Theta_i(t) = \\sum_{k=1}^6 n_i^k\\beta_k(t), \\]where $\\beta_k(t)$ are the Doodson\'s fundamental arguments ($\\tau,s,h,p,N\',p_s$) and $n_i^k$ are the Doodson multipliers for the term at frequency $i$. The multipliers must be given by doodson coded as Doodson number (e.g. 255.555) or as names intoduced by Darwin (e.g. M2).
The total parameter count is $2N$ with $N$ the number of doodson frequencies. The parameters are sorted in following order: $f_1^c, f_1^s, f_2^c, \\ldots$ with the parameter names*:*:doodson.cos(<doodsonName>):* and *:*:doodson.sin(<doodsonName>):*.'},
'planetType': { 'name': 'planetType', 'key': 'planetType', 'description': 'Defines the planet to compute the .', 'config_table': 'planetType choice planet earth sun moon mercury venus mars jupiter saturn uranus neptune pluto solarBaryCenter earthMoonBaryCenter', 'display_text': 'Defines the planet to compute the ephemeris.'},
'platformSelectorType': { 'name': 'platformSelectorType', 'key': 'platformSelectorType', 'description': 'Select a list of platforms (stations, satellites, ...). In a first step all platforms are selected if first selector exclude s platforms otherwise all platforms excluded. When every selector from top to bottom selects or deselects (with exclude ) the matching platforms. See also GnssProcessing or SlrProcessing . Select all platforms. Select all receivers/transmitters which match the name , markerName , and markerNumber . Select receivers/transmitters from each row of . Additional columns in a row represent alternatives if previous names are not available (e.g. without observation file). Select all platforms which has the specified equipment in the processed time interval. Deselects all selected receivers/transmitters of .', 'config_table': 'name string wildcards: * and ? markerName string wildcards: * and ?, from platform markerNumber string wildcards: * and ?, from platform exclude boolean deselect matching platforms inputfileStringTable filename list of names with alternatives exclude boolean deselect first matching platforms name string wildcards: * and ? serial string wildcards: * and ? equipmentType choice equipment type all all types gnssAntenna sequence antennas radome string wildcards: * and ? gnssReceiver sequence receivers version string wildcards: * and ? slrStation SLR station slrRetroReflector laser retroreflector satelliteIdentifier sequence satellite identifier cospar string wildcards: * and ? norad string wildcards: * and ? sic string wildcards: * and ? sp3 string wildcards: * and ? other other types exclude boolean deselect matching platforms selector platformSelectorType', 'display_text': 'Select a list of platforms (stations, satellites, ...). In a first step all platforms are selected if first selector excludes platforms otherwise all platforms excluded. When every selector from top to bottom selects or deselects (with exclude) the matching platforms.
Select all receivers/transmitters which match the name, markerName, and markerNumber.
File
Select receivers/transmitters from each row of inputfileStringTable. Additional columns in a row represent alternatives if previous names are not available (e.g. without observation file).
Equipment
Select all platforms which has the specified equipment in the processed time interval.
Exclude
Deselects all selected receivers/transmitters of selector.'},
'plotAxisType': { 'name': 'plotAxisType', 'key': 'plotAxisType', 'description': 'Defines the style of the axes of PlotGraph . General axis for arbitrary input data. The input data are interpreted as MJD (modified Julian date). The unit of the tick spacings should be appenend to the number and can be any of Y (year, plot with 4 digits) y (year, plot with 2 digits) O (month, plot using FORMAT_DATE_MAP ) o (month, plot with 2 digits) U (ISO week, plot using FORMAT_DATE_MAP ) u (ISO week, plot using 2 digits) r (Gregorian week, 7-day stride from start of week TIME_WEEK_START ) K (ISO weekday, plot name of day) D (date, plot using FORMAT_DATE_MAP ) d (day, plot day of month 0-31 or year 1-366, via FORMAT_DATE_MAP ) R (day, same as d, aligned with TIME_WEEK_START ) H (hour, plot using FORMAT_CLOCK_MAP ) h (hour, plot with 2 digits) M (minute, plot using FORMAT_CLOCK_MAP ) m (minute, plot with 2 digits) S (second, plot using FORMAT_CLOCK_MAP ) s (second, plot with 2 digits). A secondary time axis can be added to specify larger intervals (e.g dates of hourly data). Examples: Settings for Fig. : majorTickSpacing = 6H , secondary: majorTickSpacing = 1D . Settings for Fig. : majorTickSpacing = 2d , secondary: majorTickSpacing = 1O , options = FORMAT_DATE_MAP="o yyyy" . Settings for Fig. : majorTickSpacing = 1o , secondary: majorTickSpacing = 1Y , options = FORMAT_DATE_MAP="mm" . Axis with string labels. The coordinate system is based on the label indices (e.g. 0, 1, 2).', 'config_table': 'min double The minimum value of the axis. If no value is given, the minimum scale value is set automatically. max double The maximum value of the axis. If no value is given, the maximum scale value is set automatically. majorTickSpacing double The boundary annotation. minorTickSpacing double The spacing of the frame tick intervals. gridLineSpacing double The spacing of the grid line intervals gridLine plotLineType The style of the grid lines. unit string Naming unit to append to the axis values. label string The description of the axis. logarithmic boolean If set to \'yes\', a logarithmic scale is used for the axis. color plotColorType Setting the color of the axis bars and labels. changeDirection boolean If set to \'yes\', the directions right/up are changed to left/down. min time The minimum value of the time axis. If no value is given, the minimum scale value is set automatically. max time The maximum value of the time axis. If no value is given, the maximum scale value is set automatically. majorTickSpacing string Y: year, o: month minorTickSpacing string D: date, d: day gridLineSpacing string H: clock, h: hour, m: minute, s: second secondary sequence secondary time axis majorTickSpacing string Y: year, o: month minorTickSpacing string D: date, d: day gridLineSpacing string H: clock, h: hour, m: minute, s: second color plotColorType color of axis bars and labels gridLine plotLineType The style of the grid lines. changeDirection boolean right->left / up->down options string adjust date format labels string tick labels (ticks are placed at their index. e.g. 0, 1, ..., 5) min expression minimum value of the axis max expression maximum values of the axis majorTickSpacing expression The boundary annotation. minorTickSpacing expression The spacing of the frame tick intervals. gridLineSpacing expression The spacing of the grid line intervals gridLine plotLineType The style of the grid lines. color plotColorType set the color of the axis and labels orthogonalLabels boolean labels are oriented orthogonal to axis changeDirection boolean If set to \'yes\', the directions right/up are changed to left/down.', 'display_text': 'Defines the style of the axes of PlotGraph.
Standard
General axis for arbitrary input data.
Time
The input data are interpreted as MJD (modified Julian date). The unit of the tick spacings should be appenend to the number and can be any of
Y (year, plot with 4 digits)
y (year, plot with 2 digits)
O (month, plot using FORMAT_DATE_MAP)
o (month, plot with 2 digits)
U (ISO week, plot using FORMAT_DATE_MAP)
u (ISO week, plot using 2 digits)
r (Gregorian week, 7-day stride from start of week TIME_WEEK_START)
K (ISO weekday, plot name of day)
D (date, plot using FORMAT_DATE_MAP)
d (day, plot day of month 0-31 or year 1-366, via FORMAT_DATE_MAP)
R (day, same as d, aligned with TIME_WEEK_START)
H (hour, plot using FORMAT_CLOCK_MAP)
h (hour, plot with 2 digits)
M (minute, plot using FORMAT_CLOCK_MAP)
m (minute, plot with 2 digits)
S (second, plot using FORMAT_CLOCK_MAP)
s (second, plot with 2 digits).
A secondary time axis can be added to specify larger intervals (e.g dates of hourly data).
Examples: Settings for Fig. plotAxisType:plotAxisTime1: majorTickSpacing=6H, secondary: majorTickSpacing=1D.
Settings for Fig. plotAxisType:plotAxisTime2: majorTickSpacing=2d, secondary: majorTickSpacing=1O, options=FORMAT_DATE_MAP="o yyyy".
Settings for Fig. plotAxisType:plotAxisTime3: majorTickSpacing=1o, secondary: majorTickSpacing=1Y, options=FORMAT_DATE_MAP="mm".
Labeled
Axis with string labels. The coordinate system is based on the label indices (e.g. 0, 1, 2).'},
'plotColorType': { 'name': 'plotColorType', 'key': 'plotColorType', 'description': 'Selects a color. Used in PlotDegreeAmplitudes , PlotGraph , PlotMap , PlotMatrix , PlotSphericalHarmonicsTriangle .', 'config_table': 'plotColorType choice color black red blue green orange darkred yellow lightgreen gray rgb sequence red uint 0..255 green uint 0..255 blue uint 0..255 grayscale sequence value uint 0..255 namedColor sequence colorName string name after GMT definition cycler sequence index uint pick color based on index expression inputfileColorList filename list of colors as defined by GMT', 'display_text': 'Selects a color. Used in PlotDegreeAmplitudes, PlotGraph, PlotMap, PlotMatrix, PlotSphericalHarmonicsTriangle.
'},
'plotColorbarType': { 'name': 'plotColorbarType', 'key': 'plotColorbarType', 'description': 'A colorbar as used in PlotMap , PlotMatrix , PlotSphericalHarmonicsTriangle .', 'config_table': 'plotColorbarType sequence min double max double annotation double boundary annotation unit string appended to axis values label string description of the axis logarithmic boolean use logarithmic scale triangleLeft boolean triangleRight boolean illuminate boolean illuminate vertical boolean plot vertical color bar on the right length double length of colorbar in percent margin double between colorbar and figure [cm] colorTable string name of the color bar reverse boolean reverse direction showColorbar boolean', 'display_text': 'A colorbar as used in PlotMap, PlotMatrix, PlotSphericalHarmonicsTriangle.'},
'plotGraphLayerType': { 'name': 'plotGraphLayerType', 'key': 'plotGraphLayerType', 'description': 'Defines the content of an xy-plot of PlotGraph . Multiple layers are are plotted sequentially. With plotOnSecondAxis the alternative y-axis on the right hand side can be selected if provided. Draws a and/or points ( ) of xy data. The standard dataVariables are available to select the data columns of . If no of the is given a is required and the color is determined by valueZ . Additionally a vertical error bar can be plotted at each data point with size valueErrorBar . See Gravityfield2AreaMeanTimeSeries for an example plot. Draws a symmetrical envelope around valueY as function of valueX using deviations valueErrors . The standard dataVariables are available to select the data columns of . The data line itself is not plotted but must be added as extra . Creates a bar plot with vertical or horizontal bars out of the given x- and y-values. The standard dataVariables are available to select the data columns of . The bars ranges from valueBase (can be also an expression) to the valueY . If no is given a is required and the color is determined by valueZ . See Instrument2Histogram for an example plot. Creates a regular grid of yxz values. The standard dataVariables are available to select the data columns of . Empty grid cells are not plotted. Cells with more than one value will be set to the mean value. The grid spacing is determined by the median spacing of the input data or set by incrementX/Y . See Orbit2ArgumentOfLatitude for an example plot. Plots a rectangle to highlight an area. Writes a text at originX and originY position in the graph. With clip the text is cutted at the boundaries of the plotting area. Plot degree amplitudes of potential coefficients computed by Gravityfield2DegreeAmplitudes or PotentialCoefficients2DegreeAmplitudes . The standard dataVariables are available to select the data columns of . It plots a solid line for the valueSignal and a dotted line for the valueError per default. Plot degree amplitudes from a . The coefficients can be converted to different functionals with . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. It plots a solid line for the degree amplitude (signal) and a dotted line for the formal errors per default. This is a simplified version of .', 'config_table': 'inputfileMatrix filename each line contains x,y valueX expression expression for x-values (input columns are named data0, data1, ...) valueY expression expression for y-values (input columns are named data0, data1, ...) valueZ expression expression for the colorbar valueErrorBar expression expression for error bars (input columns are named data0, data1, ...) description string text of the legend line plotLineType symbol plotSymbolType plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). inputfileMatrix filename each line contains x,y valueX expression expression for x-values (input columns are named data0, data1, ...) valueY expression expression for y-values (input columns are named data0, data1, ...) valueErrors expression expression for error values description string text of the legend fillColor plotColorType fill color of the envelope edgeLine plotLineType edge line style of the envelope plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). inputfileMatrix filename each line contains x,y valueX expression expression for x-values (input columns are named data0, data1, ...) valueY expression expression for y-values (input columns are named data0, data1, ...) valueZ expression expression for the colorbar valueBase expression base value of bars (default: minimum y-value) width expression width of bars (default: minimum x-gap) horizontal boolean draw horizontal bars instead of vertical description string text of the legend color plotColorType edgeLine plotLineType line plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). inputfileMatrix filename each line contains x,y,z valueX expression expression for x-values (input columns are named data0, data1, ...) valueY expression expression for y-values (input columns are named data0, data1, ...) valueZ expression expression for the colorbar incrementX double the grid spacing incrementY double the grid spacing plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). minX double empty: left maxX double empty: right minY double empty: bottom maxY double empty: top description string text of the legend edgeLine plotLineType fillColor plotColorType plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). text string originX double originY double offsetX double [cm] x-offset from origin offsetY double [cm] y-offset from origin alignment string L, C, R (left, center, right) and T, M, B (top, middle, bottom) fontSize double [pt] fontColor plotColorType clip boolean clip at boundaries plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). inputfileMatrix filename degree amplitudes valueDegree expression expression for x-values (degrees) (input columns are named data0, data1, ...) valueSignal expression expression for y-values (signal) (input columns are named data0, data1, ...) valueErrors expression expression for y-values (formal errors) description string text of the legend lineSignal plotLineType lineErrors plotLineType plotOnSecondAxis boolean draw dataset on a second Y-axis (if available). gravityfield gravityfieldType kernel kernelType type choice type of variances rms degree amplitudes (square root of degree variances) accumulation cumulate variances over degrees median median of absolute values per degree time time at this time the gravity field will be evaluated minDegree uint maxDegree uint description string text of the legend lineSignal plotLineType lineErrors plotLineType plotOnSecondAxis boolean draw dataset on a second Y-axis (if available).', 'display_text': 'Defines the content of an xy-plot of PlotGraph. Multiple layers are are plotted sequentially. With plotOnSecondAxis the alternative y-axis on the right hand side can be selected if provided.
LinesAndPoints
Draws a line and/or points (symbol) of xy data. The standard dataVariables are available to select the data columns of inputfileMatrix. If no color of the symbol is given a colorbar is required and the color is determined by valueZ. Additionally a vertical error bar can be plotted at each data point with size valueErrorBar.
Draws a symmetrical envelope around valueY as function of valueX using deviations valueErrors. The standard dataVariables are available to select the data columns of inputfileMatrix. The data line itself is not plotted but must be added as extra layer:linesAndPoints.
Bars
Creates a bar plot with vertical or horizontal bars out of the given x- and y-values. The standard dataVariables are available to select the data columns of inputfileMatrix. The bars ranges from valueBase (can be also an expression) to the valueY. If no color is given a colorbar is required and the color is determined by valueZ.
Creates a regular grid of yxz values. The standard dataVariables are available to select the data columns of inputfileMatrix. Empty grid cells are not plotted. Cells with more than one value will be set to the mean value. The grid spacing is determined by the median spacing of the input data or set by incrementX/Y.
Plot degree amplitudes from a gravityfield. The coefficients can be converted to different functionals with kernel. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. It plots a solid line for the degree amplitude (signal) and a dotted line for the formal errors per default.
This is a simplified version of layer:degreeAmplitudes.'},
'plotLegendType': { 'name': 'plotLegendType', 'key': 'plotLegendType', 'description': 'Plot a legend of the descriptions provided in in PlotGraph .', 'config_table': 'plotLegendType sequence width double legend width [cm] height double legend height [cm] (default: estimated) positionX double legend x-position in normalized (0-1) coordinates. positionY double legend y-position in normalized (0-1) coordinates. anchorPoint string Two character combination of L, C, R (for left, center, or right) and T, M, B for top, middle, or bottom. e.g., TL for top left columns uint number of columns in legend textColor plotColorType color of the legend text fillColor plotColorType fill color of the legend box edgeLine plotLineType style of the legend box edge', 'display_text': 'Plot a legend of the descriptions provided in plotGraphLayer in PlotGraph.'},
'plotLineType': { 'name': 'plotLineType', 'key': 'plotLineType', 'description': 'Defines the line style to be plotted. Draws a solid line. Draws a dashed line. Draws a dotted line. Draws a custom line. The line style code is described in https://docs.generic-mapping-tools.org/latest/cookbook/features.html#specifying-pen-attributes .', 'config_table': 'width double line width [p] color plotColorType width double line width [p] color plotColorType width double line width [p] color plotColorType style string line style code width double line width [p] color plotColorType', 'display_text': 'Defines the line style to be plotted.
Solid
Draws a solid line.
Dashed
Draws a dashed line.
Dotted
Draws a dotted line.
Custom
Draws a custom line. The line style code is described in https://docs.generic-mapping-tools.org/latest/cookbook/features.html#specifying-pen-attributes.'},
'plotMapLayerType': { 'name': 'plotMapLayerType', 'key': 'plotMapLayerType', 'description': 'Defines the content of a map of PlotMap . Multiple layers are are plotted sequentially. Creates a regular grid of xyz values. The standard dataVariables are available to select the data column of . Empty grid cells are not plotted. Cells with more than one value will be set to the mean value. The grid spacing can be determined automatically for regular rectangular grids otherwise it must be set with increment . To get a better display together with some projections the grid should be internally resample d to higher resolution. It is assumed that the points of represents centers of grid cells. This assumption can be changed with gridlineRegistered (e.g. if the data starts at the north pole). Draws points ( ) and/or s between the points. If no of the is given a is required and the color is determined by the value expression. The standard dataVariables are available to select the data column of . Draws an arrow for each point in . The arrows are defined by the expressions valueNorth/East . The standard dataVariables are available to select the correspondent data columns of . The scale factor converts the input units to cm in the plot. If no is given a is required and the color is determined by the value expression. With scaleArrow a reference arrow as legend can be plotted inside or outside the map. Draws a . If is not set and a value is given the fill color is taken from a . Plots coastlines. GMT provides them in different resolution s. Features with an area smaller than minArea in will not be plotted. Plots rivers and lakes. GMT provides different classes ( https://docs.generic-mapping-tools.org/latest/coast.html ). Plots national boundaries. GMT provides them in different resolution s. An image of the Earth\'s surface as seen from outer space - the image is known as blue marble . The directory of inputfileChannels contains several files in different resolutions representing the Earth\'s surface each month throughout a year. Writes a text at originLongitude and originLatitude position in the map. With clip the text is cutted at the boundaries of the plotting area.', 'config_table': 'inputfileGriddedData filename value expression expression to compute values (input columns are named data0, data1, ...) increment angle the grid spacing [degrees] illuminate boolean illuminate grid resample sequence intermediateDpi double oversample grid for a smoother visual effect interpolationMethod choice interpolation method for oversampling bspline B-Spline interpolation bicubic bicubic interpolation bilinear bilinear interpolation nearest nearest neighbour interpolation threshold double A threshold of 1.0 requires all (4 or 16) nodes involved in interpolation to be non-NaN. 0.5 will interpolate about half way from a non-NaN value; 0.1 will go about 90% of the way. gridlineRegistered boolean treat input as point values instead of cell means inputfileGriddedData filename value expression expression to compute color (input columns are named data0, data1, ...) symbol plotSymbolType line plotLineType style of connecting lines drawLineAsGreatCircle boolean draw connecting lines as great circles (otherwise, a straight line is drawn instead) inputfileGriddedData filename grid file with north and east values for arrows valueNorth expression expression to compute north values (input columns are named data0, data1, ...) valueEast expression expression to compute east values (input columns are named data0, data1, ...) value expression expression to compute arrow color (input columns are named data0, data1, ...) scale double [cm per input unit] length scale factor penSize double [pt] width of arrow shaft headSize double [pt] size of arrow head, 0: no head, negative: reverse head color plotColorType empty: from value scaleArrow sequence draw an arrow for scale reference originX double [0-1] 0: left, 1: right originY double [0-1] 0: bottom, 1: top length double in same unit as valueNorth and valueEast unit string displayed unit text (e.g. 1 cm) label string description of the arrows inputfilePolygon filename line plotLineType style of border lines fillColor plotColorType polygon fill color (no fill color: determine from value if given, else: no fill) value double value to compute fill color from a colorbar (ignored if a fillColor is given) drawLineAsGreatCircle boolean draw connecting lines as great circles (otherwise, a straight line is drawn instead) resolution choice crude low medium high full line plotLineType line style for coastlines landColor plotColorType fill land area oceanColor plotColorType fill ocean area minArea uint [km^2] features with a smaller area than this are dropped class choice riversCanalsLakes riversCanals permanentRiversLakes permanentRivers intermittentRivers canals singleClass sequence class uint 0-10. See GMT documentation line plotLineType resolution choice crude low medium high full line plotLineType inputfileImage filename Blue Marble image file brightness double brightness of bitmap [-1, 1] illuminate sequence add hillshade based on topography inputfileTopography filename GMT grid file containing topography. azimuth angle direction of lighting source [deg] elevation angle direction of lighting source [deg] ambient double ambient lighting diffuse double diffuse lighting specular double specular reflection shine double surface shine amplitude double scale gradient by factor text string originLongitude angle [deg] originLatitude angle [deg] offsetX double [cm] x-offset from origin offsetY double [cm] y-offset from origin alignment string L, C, R (left, center, right) and T, M, B (top, middle, bottom) fontSize double fontColor plotColorType clip boolean clip at boundaries', 'display_text': 'Defines the content of a map of PlotMap. Multiple layers are are plotted sequentially.
GriddedData
Creates a regular grid of xyz values. The standard dataVariables are available to select the data column of inputfileGriddedData. Empty grid cells are not plotted. Cells with more than one value will be set to the mean value. The grid spacing can be determined automatically for regular rectangular grids otherwise it must be set with increment. To get a better display together with some projections the grid should be internally resampled to higher resolution. It is assumed that the points of inputfileGriddedData represents centers of grid cells. This assumption can be changed with gridlineRegistered (e.g. if the data starts at the north pole).
Points
Draws points (symbol) and/or lines between the points. If no color of the symbol is given a colorbar is required and the color is determined by the value expression. The standard dataVariables are available to select the data column of inputfileGriddedData.
Arrows
Draws an arrow for each point in inputfileGriddedData. The arrows are defined by the expressions valueNorth/East. The standard dataVariables are available to select the correspondent data columns of inputfileGriddedData. The scale factor converts the input units to cm in the plot. If no color is given a colorbar is required and the color is determined by the value expression. With scaleArrow a reference arrow as legend can be plotted inside or outside the map.
Plots national boundaries. GMT provides them in different resolutions.
BlueMarble
An image of the Earth\'s surface as seen from outer space - the image is known as blue marble. The directory of inputfileChannels contains several files in different resolutions representing the Earth\'s surface each month throughout a year.
Text
Writes a text at originLongitude and originLatitude position in the map. With clip the text is cutted at the boundaries of the plotting area.'},
'plotMapProjectionType': { 'name': 'plotMapProjectionType', 'key': 'plotMapProjectionType', 'description': 'Selects the underlying projection of PlotMap . The Robinson projection, presented by Arthur H. Robinson in 1963, is a modified cylindrical projection that is neither conformal nor equal-area. Central meridian and all parallels are straight lines; other meridians are curved. It uses lookup tables rather than analytic expressions to make the world map look right. The orthographic azimuthal projection is a perspective projection from infinite distance. It is therefore often used to give the appearance of a globe viewed from space. The orthographic azimuthal projection is a perspective projection from infinite distance. It is therefore often used to give the appearance of a globe viewed from space. Stereographic projection around given central point. Skyplot used to plot azimuth/elevation data as generated by GnssAntennaDefinition2Skyplot or GnssResiduals2Skyplot . A particular subset of the transverse Mercator is the Universal Transverse Mercator (UTM) which was adopted by the US Army for large-scale military maps. Here, the globe is divided into 60 zones between 84 S and 84 N, most of which are 6 wide. Each of these UTM zones have their unique central meridian. This conic projection was designed by Lambert (1772) and has been used extensively for mapping of regions with predominantly east-west orientation. Linear mapping of longitude/latitude to x/y (Plate Caree). This pseudo-cylindrical, equal-area projection was developed by Mollweide in 1805. Parallels are unequally spaced straight lines with the meridians being equally spaced elliptical arcs. The scale is only true along latitudes 40 44\' north and south. The projection is used mainly for global maps showing data distributions.', 'config_table': 'centralMeridian angle central meridian [degree] lambdaCenter angle central point [degree] phiCenter angle central point [degree] lambdaCenter angle longitude of central point in degrees phiCenter angle latitude of central point in degrees altitude double [km] azimuth angle to the east of north of view [degrees] tilt angle upward tilt of the plane of projection, if negative, then the view is centered on the horizon [degrees] viewpointTwist angle clockwise twist of the viewpoint [degrees] viewpointWidth angle width of the viewpoint [degrees] viewpointHeight angle height of the viewpoint [degrees] lambdaCenter angle longitude of central point in degrees phiCenter angle latitude of central point in degrees zone string UTM zone code (e.g. 33N) lambda0 angle longitude of projection center [deg] phi0 angle latitude of projection centert [deg] phi1 angle latitude of first standard parallel [deg] phi2 angle latitude of first standard parallel [deg] centralMeridian angle central meridian [degree]', 'display_text': 'Selects the underlying projection of PlotMap.
Robinson
The Robinson projection, presented by Arthur H. Robinson in 1963, is a modified cylindrical projection that is neither conformal nor equal-area. Central meridian and all parallels are straight lines; other meridians are curved. It uses lookup tables rather than analytic expressions to make the world map look right.
Orthographic
The orthographic azimuthal projection is a perspective projection from infinite distance. It is therefore often used to give the appearance of a globe viewed from space.
Perspective sphere
The orthographic azimuthal projection is a perspective projection from infinite distance. It is therefore often used to give the appearance of a globe viewed from space.
Polar
Stereographic projection around given central point.
A particular subset of the transverse Mercator is the Universal Transverse Mercator (UTM) which was adopted by the US Army for large-scale military maps. Here, the globe is divided into 60 zones between 84$^{o}$S and 84$^{o}$N, most of which are 6$^{o}$ wide. Each of these UTM zones have their unique central meridian.
Lambert
This conic projection was designed by Lambert (1772) and has been used extensively for mapping of regions with predominantly east-west orientation.
Linear
Linear mapping of longitude/latitude to x/y (Plate Caree).
Mollweide
This pseudo-cylindrical, equal-area projection was developed by Mollweide in 1805. Parallels are unequally spaced straight lines with the meridians being equally spaced elliptical arcs. The scale is only true along latitudes 40$^{o}$44\' north and south. The projection is used mainly for global maps showing data distributions.'},
'plotSymbolType': { 'name': 'plotSymbolType', 'key': 'plotSymbolType', 'description': 'Plots a symbol as used e.g. in or .', 'config_table': 'plotSymbolType choice symbol circle sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean star sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean cross sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean square sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean triangle sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean diamond sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean dash sequence color plotColorType empty: determined from value size double size of symbol [point] blackContour boolean', 'display_text': 'Plots a symbol as used e.g. in plotGraphLayer:linesAndPoints or plotMapLayer:points.'},
'podRightSideType': { 'name': 'podRightSideType', 'key': 'podRightSideType', 'description': 'Observation vector for precise orbit data (POD) of equations in a least squares adjustment. The observations are reduced by the effect of and (observed minus computed).', 'config_table': 'podRightSideType sequence inputfileOrbit filename kinematic positions of satellite as observations inputfileAccelerometer filename non-gravitational forces in satellite reference frame forces forcesType', 'display_text': 'Observation vector for precise orbit data (POD) of observation equations in a least squares adjustment. The observations are reduced by the effect of inputfileAccelerometer and forces (observed minus computed).'},
'sggRightSideType': { 'name': 'sggRightSideType', 'key': 'sggRightSideType', 'description': 'Observation vector for gradiometer data (satellite gravity gradiometry, SGG) of equations in a least squares adjustment. The observations are reduced by an , the effect of , and (observed minus computed). The reference gradiometer data can be precomputed with SimulateGradiometer .', 'config_table': 'sggRightSideType sequence inputfileGradiometer filename observed gravity gradients inputfileReferenceGradiometer filename precomputed gradients at orbit positions referencefield gravityfieldType tides tidesType', 'display_text': 'Observation vector for gradiometer data (satellite gravity gradiometry, SGG) of observation equations in a least squares adjustment. The observations are reduced by an inputfileReferenceGradiometer, the effect of referencefield, and tides (observed minus computed).
The reference gradiometer data can be precomputed with SimulateGradiometer.'},
'slrParametrizationType': { 'name': 'slrParametrizationType', 'key': 'slrParametrizationType', 'description': 'This class defines the models and parameters of the linearized observation equations for normal points (see SlrProcessing ) where the left side is the observation vector minus the effects computed from the a priori models. After each least squares adjustment (see ) the a priori parameters are updated The vector can be written with . Any outputfiles defined in the parametrizations are written with . Each parametrization (and possible constraint equations) has a name which enables activating/deactivating the estimation of subsets of with . The a priori model is unaffected and is always reduced. The model for the one way range observations between station and reflector can be described as See also SlrProcessing . A priori tropospheric correction is handled by a model (e.g. Mendes and Pavlis). Additional parameters in for zenith delay can be set up via . These parameters can be soft-constrained using to avoid an unsolvable system of normal equations in case of data gaps. The parameter names are <station>:troposphere:<temporal>:<interval> . The estimation of (reduced) dynamic orbits is formulated as variational equations. It is based on calculated with PreprocessingVariationalEquation . Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree . The must include at least those parameters that were estimated in PreprocessingVariationalEquationOrbitFit . Additional parameters can be set up to reduce orbit mismodeling. The parameters and parameter names are divided into global <satellite>:<parametrizationAcceleration>:*:* , <satellite>:stochasticPulse.x::<time> , <satellite>:stochasticPulse.y::<time> , <satellite>:stochasticPulse.z::<time> , and arc related parameters <satellite>:arc<no>.<parametrizationAcceleration>:*:* , <satellite>:arc<no>.position0.x:: , <satellite>:arc<no>.position0.y:: , <satellite>:arc<no>.position0.z:: . <satellite>:arc<no>.velocity0.x:: , <satellite>:arc<no>.velocity0.y:: , <satellite>:arc<no>.velocity0.z:: . Estimates a (time depending) gravity field together with at least one . The parametrization of the gravity field can be set with . The parameter names are gravityfield:<parametrization>:*:* . Estimates a static position for all in the terrestrial frame. No-net constraints can be applied for a subset of stations, , with a standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma and noNetScaleSigma . If the template is provided the constraints are applied relatively to these positions. Only stations with an existing position file are considered. Without the constraints are applied towards the apriori values from . As a single corrupted station position can disturb the no-net conditions, the rotation/translation parameters are estimated in a robust least squares adjustment beforehand. The computed weight matrix is used to downweight corrupted stations in the constraint equations. In case you want to align to an ITRF/ILRS reference frame, precise coordinates can be generated with Sinex2StationPositions . The parameter names are <station>:position.x:: , <station>:position.y:: , <station>:position.z:: . Earth rotation parameters (ERPs) can be estimated by defining estimatePole ( , ) and estimateUT1 ( ). Estimating length of day (LOD) with the sign according to IGS conventions requires a negative value in . Constraints on the defined parameters can be added via . An example would be to set up so the parameter is included in the normal equation system . Since cannot be determined by SLR, a hard constraint to its a priori value can then be added. The parameter names are earth:polarMotion.xp:<temporal>:<interval> , earth:polarMotion.yp:<temporal>:<interval> , earth:UT1:<temporal>:<interval> , earth:nutation.X:<temporal>:<interval> , earth:nutation.>:<temporal>:<interval> . A priori station range bias value for all . The provides the mean range biases , but these have been determined using the passive satellites LAGEOS and Etalon and are therefore only suitable for passive satellites and not for active ones. Use SlrSinexDataHandling2Files to convert the range biases from to instrument file . Estimates a constant station range bias in for . The parameter names are <station>:rangeBias:: . A priori satellite range bias value for . Estimates a constant satellite range bias in for . The parameter names a <satellite>:rangeBias:: . A priori station-satellite range bias value between all - pairs. For standard processing this class should be setup twice. Once for the model from José Rodríguez (see SlrComModel2RangeBiasStationSatellite ) and additionally for biases from the converted with SlrSinexDataHandling2Files . Estimates the station-satellite range bias in between all - pairs. The parameter names are <station>.<satellite>:rangeBias:: . A priori time bias value for all . The provides the mean time biases , but these have been determined using the passive satellites LAGEOS and Etalon and are therefore only suitable for passive satellites and not for active ones. Use SlrSinexDataHandling2Files to convert the time biases from to instrument file . Estimates a time bias in for . The parameter names are <station>:timeBias:<temporal>:<interval> . Add a pseudo observation equation (constraint) for each selected where is the bias and is the a priori value of the parameter if relativeToApriori is not set. The standard deviation sigma is used to weight the observation equations. Groups a set of parameters. This class can be used to structure complex parametrizations and has no further effect itself.', 'config_table': 'name string used for parameter selection selectStations platformSelectorType outputfileTroposphere filename columns: MJD, ZHD, ZWD, dry north gradient, wet north gradient, dry east gradient, wet east gradient troposphere troposphereType a priori troposphere model troposphereEstimation parametrizationTemporalType [m] parametrization of zenith delays name string used for parameter selection selectSatellites platformSelectorType outputfileOrbit filename variable {satellite} available outputfileParameters filename variable {satellite} available inputfileVariational filename variable {satellite} available stochasticPulse timeSeriesType [mu/s] parametrization of stochastic pulses parametrizationAcceleration parametrizationAccelerationType orbit force parameters ephemerides ephemeridesType integrationDegree uint integration of forces by polynomial approximation of degree n interpolationDegree uint for orbit interpolation and velocity calculation name string used for parameter selection parametrization parametrizationGravityType name string used for parameter selection selectStations platformSelectorType outputfileGriddedPosition filename delta north east up for all stations outputfilePosition filename variable {station} available, full estimated coordinates (in TRF) nameConstraint string used for parameter selection selectNoNetStations platformSelectorType inputfileNoNetPositions filename variable {station} available, precise coordinates used for no-net constraints (in TRF) noNetTranslationSigma double (0 = unconstrained) sigma [m] for no-net translation constraint on station coordinates noNetRotationSigma double (0 = unconstrained) sigma [m] at Earth\'s surface for no-net rotation constraint on station coordinates noNetScaleSigma double (0 = unconstrained) sigma [m] for no-net scale constraint on station coordinates huber double stations > huber*sigma0 are downweighted in no-net constraint huberPower double stations > huber: sigma=(e/huber)^huberPower*sigma0 name string used for parameter selection outputfileEOP filename EOP time series (mjd, xp, yp, sp, dUT1, LOD, X, Y, S) estimatePole parametrizationTemporalType xp, yp [mas] estimateUT1 parametrizationTemporalType rotation angle [ms] estimateNutation parametrizationTemporalType dX, dY [mas] selectStations platformSelectorType inputfileRangeBias filename variable {station} available name string used for parameter selection selectStations platformSelectorType outputfileRangeBias filename variable {station} available selectSatellites platformSelectorType inputfileRangeBias filename variable {satellite} available name string used for parameter selection selectSatellites platformSelectorType outputfileRangeBias filename variable {satellite} available selectStations platformSelectorType selectSatellites platformSelectorType inputfileRangeBias filename variable {station} and {satellite} available name string used for parameter selection selectStations platformSelectorType selectSatellites platformSelectorType outputfileRangeBias filename variable {station} and {satellite} available selectStations platformSelectorType inputfileTimeBias filename variable {station} available name string used for parameter selection selectStations platformSelectorType estimateTimeBias parametrizationTemporalType [ms] name string parameters parameterSelectorType parameter to constrain sigma double sigma of the constraint (same unit as parameter) bias double constrain all selected parameters towards this value relativeToApriori boolean constrain only dx and not full x=dx+x0 parametrization slrParametrizationType', 'display_text': 'This class defines the models and parameters of the linearized observation equations for normal points (see SlrProcessing) \\[\\label{slrParametrizationType:model} \\M l - \\M f(\\M x_0) = \\left.\\frac{\\partial \\M f(\\M x)}{\\partial \\M x}\\right|_{\\M x_0} \\Delta\\M x + \\M\\epsilon, \\]where the left side is the observation vector minus the effects computed from the a priori models. After each least squares adjustment (see SlrProcessing:processingStep:estimate) the a priori parameters are updated \\[\\label{slrParametrizationType:update} \\M x_0 := \\M x_0 + \\Delta\\hat{\\M x}. \\]The vector $\\M x_0$ can be written with SlrProcessing:processingStep:writeAprioriSolution. Any outputfiles defined in the parametrizations are written with SlrProcessing:processingStep:writeResults.
Each parametrization (and possible constraint equations) has a name which enables activating/deactivating the estimation of subsets of $\\Delta\\M x$ with SlrProcessing:processingStep:selectParametrizations. The a priori model $\\M f(\\M x_0)$ is unaffected and is always reduced.
The model for the one way range observations between station $s$ and reflector $r$ can be described as \\[\\label{slrParametrizationType:slrFullModel} \\begin{split} f_s^r(\\M x) &= \\frac{1}{2}\\left(\\left\\lVert \\M r^r(t_{bounce})-\\M r_s(t_{trans}) \\right\\rVert + \\left\\lVert \\M r_s(t_{recv})-\\M r^r(t_{bounce}) \\right\\rVert\\right) \\\\ &+ \\text{troposphere}(t,\\M r_{ss}^r) + \\text{bias}^r + \\text{bias}_s + \\text{bias}_s^r + \\text{other}(\\ldots) + \\epsilon_r^s \\end{split} \\] See also SlrProcessing.
Troposphere
A priori tropospheric correction is handled by a troposphere model (e.g. Mendes and Pavlis). Additional parameters in $[m]$ for zenith delay can be set up via troposphereEstimation. These parameters can be soft-constrained using parametrization:constraints to avoid an unsolvable system of normal equations in case of data gaps.
The parameter names are <station>:troposphere:<temporal>:<interval>.
Estimates a static position for all selectReceivers in the terrestrial frame.
No-net constraints can be applied for a subset of stations, selectNoNetReceivers, with a standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma and noNetScaleSigma. If the template inputfileNoNetPositions is provided the constraints are applied relatively to these positions. Only stations with an existing position file are considered. Without inputfileNoNetPositions the constraints are applied towards the apriori values from SlrProcessing:station. As a single corrupted station position can disturb the no-net conditions, the rotation/translation parameters are estimated in a robust least squares adjustment beforehand. The computed weight matrix is used to downweight corrupted stations in the constraint equations.
In case you want to align to an ITRF/ILRS reference frame, precise coordinates can be generated with Sinex2StationPositions.
Constraints on the defined parameters can be added via parametrization:constraints. An example would be to set up estimateUT1:constant so the $dUT1$ parameter is included in the normal equation system . Since $dUT1$ cannot be determined by SLR, a hard constraint to its a priori value can then be added.
The parameter names are <station>:timeBias:<temporal>:<interval>.
Constraints
Add a pseudo observation equation (constraint) for each selected parameters \\[ b-x_0 = 1 \\cdot dx + \\epsilon, \\]where $b$ is the bias and $x_0$ is the a priori value of the parameter if relativeToApriori is not set. The standard deviation sigma is used to weight the observation equations.
Group
Groups a set of parameters. This class can be used to structure complex parametrizations and has no further effect itself.'},
'slrProcessingStepType': { 'name': 'slrProcessingStepType', 'key': 'slrProcessingStepType', 'description': 'Processing step in SlrProcessing . Processing steps enable a dynamic definition of the consecutive steps performed during any kind of SLR processing. The most common steps are , which performs an iterative least squares adjustment, and , which writes all output files defined in SlrProcessing and is usually the last step. Some steps such as and affect all subsequent steps. In case these steps are used within a step, they only affect the steps within this level. Iterative non-linear least squares adjustment. In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters. The estimated parameters serve as a priori values in the next iteration and the following processing steps. Iterates until either every single parameter update (converted to an influence in meter) is below a convergenceThreshold or maxIterationCount is reached. With computeResiduals the observation equations are computed again after each update to compute the observation residuals. The overall standard deviation of a single observation used for the weighting is composed of several factors where the is given by :accuracy. The other factors are estimated iteratively from the residuals. With computeWeights a standardized variance for each residual is computed taking the redundancy into account. If is above a threshold huber the observation gets a higher standard deviation used for weighting according to similar to robust least squares adjustment . With adjustSigma0 an individual variance factor can be computed for each station separately In this step all outputfiles defined in are written. It considers the settings of and . It is usually the last processing step, but can also be used at other points in the processing in combination with suffix to write intermediate results. Accumulates the normal equations matrix and writes it. If is set only the selected parameters are written to the normal equations and all other parameters are eliminated beforehand (implicitly solved). The solution of the normals would results in (see ). To write the appropriate apriori vector use . Writes the current apriori vector (see ). If is set only the selected parameters are written. Writes the observation residuals for all . For for each station-satellite pair a file is written. The file name is interpreted as a template with the variables {station} and {satellite} being replaced by the station name. Writes a list of stations (stations) which are used in the last step and selected by . Writes a list of satellites which are used in the last step and selected by . Print residual statistics. station sigma redundancy obsCount outlier ---------------------------------------------- 1874 0.52 0.86 22 1 (4.55 1889 1.20 0.98 186 5 (2.69 1890 0.63 0.77 14 1 (7.14 1891 0.49 0.50 6 0 (0.00 7237 1.08 0.95 236 14 (5.93 7394 0.36 0.88 26 0 (0.00 7811 0.38 0.41 5 0 (0.00 7819 1.21 0.94 120 1 (0.83 7821 0.69 0.95 202 3 (1.49 7827 0.40 0.85 29 1 (3.45 7839 0.52 0.93 143 10 (6.99 7840 0.15 0.80 16 0 (0.00 7841 0.26 0.90 56 1 (1.79 7941 0.55 0.92 277 5 (1.81 8834 0.66 0.88 101 1 (0.99 ---------------------------------------------- satellite sigma redundancy obsCount outlier ---------------------------------------------- lageos1 1.04 0.94 722 24 (3.32 lageos2 0.91 0.95 590 11 (1.86 etalon1 1.19 0.78 57 2 (3.51 etalon2 1.10 0.81 70 6 (8.57 ---------------------------------------------- Enable/disable parameter groups and constraint groups for subsequent steps, e.g. or . The name and nameConstraint of these groups are defined in . Prior models or previously estimated parameters used as new apriori values are unaffected and they are always reduced from the observations. This means all unselected parameters are kept fixed to their last result. This step can be used to process only a subset of satellites in subsequent processing steps. This step can be used to process only a subset of stations in subsequent processing steps. Perform these processing steps. This step can be used to structure complex processing flows. The processing steps defined within a group only affect the steps within this group.', 'config_table': 'computeResiduals boolean adjustSigma0 boolean adjust sigma0 by scale factor (per station) computeWeights boolean downweight outliers huber double residuals > huber*sigma0 are downweighted huberPower double residuals > huber: sigma=(e/huber)^huberPower*sigma0 convergenceThreshold double [m] stop iteration once full convergence is reached maxIterationCount uint maximum number of iterations suffix string appended to every output file name (e.g. orbit.G01.suffix.dat) outputfileNormalEquations filename normals remainingParameters parameterSelectorType parameter order/selection of output normal equations constraintsOnly boolean write only normals of constraints without observations defaultNormalsBlockSize uint block size for distributing the normal equations, 0: one block, empty: original block size outputfileAprioriSolution filename a priori parameters outputfileParameterNames filename parameter names remainingParameters parameterSelectorType parameter order/selection of output normal equations selectStations platformSelectorType subset of used stations selectSatellites platformSelectorType subset of used satellites outputfileResiduals filename variable {station} available selectStations platformSelectorType subset of used stations outputfileUsedStationList filename ascii file with names of used stations selectSatellites platformSelectorType subset of used satellites outputfileUsedSatelliteList filename ascii file with names parametrization choice enable sequence name string wildcards: * and ? disable sequence name string wildcards: * and ? selectSatellites platformSelectorType selectStations platformSelectorType processingStep slrProcessingStepType steps are processed consecutively', 'display_text': 'Processing step in SlrProcessing.
Processing steps enable a dynamic definition of the consecutive steps performed during any kind of SLR processing. The most common steps are estimate, which performs an iterative least squares adjustment, and writeResults, which writes all output files defined in SlrProcessing and is usually the last step. Some steps such as selectParametrizations and selectStations affect all subsequent steps. In case these steps are used within a group step, they only affect the steps within this level.
Estimate
Iterative non-linear least squares adjustment. In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters. The estimated parameters serve as a priori values in the next iteration and the following processing steps. Iterates until either every single parameter update (converted to an influence in meter) is below a convergenceThreshold or maxIterationCount is reached.
With computeResiduals the observation equations are computed again after each update to compute the observation residuals.
The overall standard deviation of a single observation used for the weighting is composed of several factors \\[ \\hat{\\sigma}_i = \\hat{\\sigma}_i^{huber} \\hat{\\sigma}^{stat} \\sigma_{apriori}^{stat}, \\]where the $\\sigma_{apriori}^{stat}$ is given by station:accuracy. The other factors are estimated iteratively from the residuals.
With computeWeights a standardized variance $\\hat{s}_i^2$ for each residual $\\hat{\\epsilon}_i$ is computed \\[ \\hat{s}_i^2 = \\frac{1}{\\hat{\\sigma}^{stat} \\sigma_{apriori}^{stat}}\\frac{\\hat{\\epsilon}_i^2}{r_i} \\qquad\\text{with}\\qquad r_i = \\left(\\M A\\left(\\M A^T\\M A\\right)^{-1}\\M A^T\\right)_{ii} \\]taking the redundancy $r_i$ into account. If $\\hat{s}_i$ is above a threshold huber the observation gets a higher standard deviation used for weighting according to \\[ \\hat{\\sigma}_i^{huber} = \\left\\{ \\begin{array}{ll} 1 & s < huber,\\\\ (\\hat{s}_i/huber)^{huberPower} & s \\ge huber \\end{array} \\right., \\]similar to robust least squares adjustment.
With adjustSigma0 an individual variance factor can be computed for each station separately \\[ \\hat{\\sigma}^{stat} = \\sqrt{\\frac{\\hat{\\M\\epsilon}^T\\M P\\hat{\\M\\epsilon}}{r}}. \\]
It is usually the last processing step, but can also be used at other points in the processing in combination with suffix to write intermediate results.
WriteNormalEquations
Accumulates the normal equations matrix and writes it. If remainingParameters is set only the selected parameters are written to the normal equations and all other parameters are eliminated beforehand (implicitly solved).
Writes the observation residuals for all selectStations. For for each station-satellite pair a file is written. The file name is interpreted as a template with the variables {station} and {satellite} being replaced by the station name.
WriteUsedStationList
Writes a list of stations (stations) which are used in the last step and selected by selectStations.
WriteUsedSatelliteList
Writes a list of satellites which are used in the last step and selected by selectSatellites.
Enable/disable parameter groups and constraint groups for subsequent steps, e.g. processingStep:estimate or processingStep:writeResults. The name and nameConstraint of these groups are defined in parametrizations. Prior models or previously estimated parameters used as new apriori $\\M x_0$ values are unaffected and they are always reduced from the observations. This means all unselected parameters are kept fixed to their last result.
SelectSatellites
This step can be used to process only a subset of satellites in subsequent processing steps.
SelectStations
This step can be used to process only a subset of stations in subsequent processing steps.
Group
Perform these processing steps. This step can be used to structure complex processing flows. The select.. processing steps defined within a group only affect the steps within this group.'},
'slrSatelliteGeneratorType': { 'name': 'slrSatelliteGeneratorType', 'key': 'slrSatelliteGeneratorType', 'description': 'Definition and basic information of SLR satellites. See also SlrProcessing . A list of satellite names must be provided via . The other input files are read for each satellite, where the file name is interpreted as a template with the variable {satellite} being replaced by the satellite name from list. The contains information about laser retro-reflector, optical reference point, retro-reflector orientation, range corrections and center of mass. It can be created via PlatformCreate . If ist not provided an orbit reference frame (along, cross, nearly nadir) is assumed.', 'config_table': 'inputfileSatelliteList filename ascii file with satellite names, used to loop variable {satellite} inputfileSatelliteInfo filename variable {satellite} available inputfileOrbit filename variable {satellite} available inputfileAttitude filename variable {satellite} available interpolationDegree uint for orbit interpolation and velocity calculation', 'display_text': 'Definition and basic information of SLR satellites.
A list of satellite names must be provided via inputfileSatelliteList. The other input files are read for each satellite, where the file name is interpreted as a template with the variable {satellite} being replaced by the satellite name from list. The inputfileSatelliteInfo contains information about laser retro-reflector, optical reference point, retro-reflector orientation, range corrections and center of mass. It can be created via PlatformCreate. If inputfileAttitude ist not provided an orbit reference frame (along, cross, nearly nadir) is assumed.'},
'slrStationGeneratorType': { 'name': 'slrStationGeneratorType', 'key': 'slrStationGeneratorType', 'description': 'Definition and basic information of SLR ground stations. See also SlrProcessing . A list of station names must be provided via . It defines the variable {station} for the station specific input files. The contains metadata information like station number, station name and approximate station postion in terrestrial reference frame (TRF) considering the station eccentricities. They can be created via SinexEccentricties2SlrPlatform or PlatformCreate . The are separate files for each {station} - {satellite} pair. They can be converted from CRD format via Crd2NormalPoints , CSTG format via Cstg2NormalPoints and MERIT II format via Merit2NormalPoints and Merit2FullRate . The apriori observation weighting is defined by the expression accuracy in . The following variables are defined for each observation from the : {residual} , {accuracy} , {redundancy} , {laserWavelength} , {azimut} , {elevation} . Observations with non-positive accuracies are removed. This can be used for a rough outlier removal by an expression such as accuracy = if(abs(residual)>30, NAN, accuracy) . The effects of loading and tidal deformation on station positions can be corrected for via and , respectively. Tidal deformations typically include: : Earth tidal deformations (IERS conventions) : ocean tidal deformations (e.g. fes2014b n720, minDegree = 1 ) : atmospheric tidal deformation (e.g. AOD1B RL06, minDegree = 1 ) : pole tidal deformations (IERS conventions) : ocean pole tidal deformations (IERS conventions)', 'config_table': 'inputfileStationList filename ascii file with station names inputfileStationInfo filename station metadata inputfileStationPosition filename station position disableStationWithoutPosition boolean drop stations without apriori position inputfileObservations filename variable {station} {satellite} available accuracy expression [m] used for weighting, variables: {residual}, {accuracy}, {redundancy}, {laserWavelength}, {azimut}, {elevation} loadingDisplacement gravityfieldType loading deformation tidalDisplacement tidesType tidal deformation ephemerides ephemeridesType for tidal deformation inputfileDeformationLoadLoveNumber filename inputfilePotentialLoadLoveNumber filename if full potential is given and not only loading potential elevationCutOff angle [degree] ignore observations below cutoff interpolationDegree uint for position interpolation', 'display_text': 'Definition and basic information of SLR ground stations.
The apriori observation weighting is defined by the expression accuracy in $[m]$. The following variables are defined for each observation from the inputfileObservations: {residual}, {accuracy}, {redundancy}, {laserWavelength}, {azimut}, {elevation}. Observations with non-positive accuracies are removed. This can be used for a rough outlier removal by an expression such as accuracy = if(abs(residual)>30, NAN, accuracy).
The effects of loading and tidal deformation on station positions can be corrected for via loadingDisplacement and tidalDisplacement, respectively. Tidal deformations typically include:
poleTide: pole tidal deformations (IERS conventions)
poleOceanTide: ocean pole tidal deformations (IERS conventions)
'},
'sphericalHarmonicsFilterType': { 'name': 'sphericalHarmonicsFilterType', 'key': 'sphericalHarmonicsFilterType', 'description': 'Filtering of a spherical harmonics expansion. Orderwise filtering with the DDK filter by Kusche et al. 2009. Filtering the spherical harmonics expansion with a Gaussian filter. radius gives the filter radius on the Earth surface in km. Filtering the spherical harmonics expansion with a matrix filter.', 'config_table': 'level uint DDK filter level (1, 2, 3, ..., 8) inputfileNormalEquation filename radius double filter radius [km] inputfileMatrix filename minDegree uint of matrix maxDegree uint of matrix numbering sphericalHarmonicsNumberingType numbering scheme of the matrix', 'display_text': 'Filtering of a spherical harmonics expansion.
DDK
Orderwise filtering with the DDK filter by Kusche et al. 2009.
Gauss
Filtering the spherical harmonics expansion with a Gaussian filter. radius gives the filter radius on the Earth surface in km.
Matrix
Filtering the spherical harmonics expansion with a matrix filter.'},
'sphericalHarmonicsNumberingType': { 'name': 'sphericalHarmonicsNumberingType', 'key': 'sphericalHarmonicsNumberingType', 'description': 'This class organizes the numbering scheme of spherical harmonics coefficients in a parameter vector (e.g Gravityfield2SphericalHarmonicsVector and the design matrix of . Numbering degree by degree: Numbering order by order: Numbering order by order with cnm, snm non-alternating: Numbering as specified in the chosen file. The is a matrix with the first column indicating cnm/snm with 0 or 1. The second and third column specify degree and order.', 'config_table': 'inputfile filename', 'display_text': 'This class organizes the numbering scheme of spherical harmonics coefficients in a parameter vector (e.g Gravityfield2SphericalHarmonicsVector and the design matrix of parametrizationGravity:sphericalHarmoncis.
Numbering order by order: \\[ c20, c30, c40, \\ldots, c21, s21, c31, s31, \\ldots, c22, s22 \\]
OrderNonAlternating
Numbering order by order with cnm, snm non-alternating: \\[ c20, c30, c40, \\ldots, c21, c31, c41, \\ldots, s21, s31, s41, \\]
File
Numbering as specified in the chosen file. The inputfile is a matrix with the first column indicating cnm/snm with 0 or 1. The second and third column specify degree and order.'},
'sstRightSideType': { 'name': 'sstRightSideType', 'key': 'sstRightSideType', 'description': 'Observation vector for GRACE like data (satellite-tracking and precise orbit data (POD)) of equations in a least squares adjustment. The observations are reduced by the effect of and (observed minus computed).', 'config_table': 'sstRightSideType sequence inputfileSatelliteTracking filename ranging observations and corrections inputfileOrbit1 filename kinematic positions of satellite A as observations inputfileOrbit2 filename kinematic positions of satellite B as observations inputfileAccelerometer1 filename non-gravitational forces in satellite reference frame A inputfileAccelerometer2 filename non-gravitational forces in satellite reference frame B forces forcesType', 'display_text': 'Observation vector for GRACE like data (satellite-tracking and precise orbit data (POD)) of observation equations in a least squares adjustment. The observations are reduced by the effect of inputfileAccelerometer and forces (observed minus computed).'},
'thermosphereType': { 'name': 'thermosphereType', 'key': 'thermosphereType', 'description': 'This class provides functions for calculating the density, temperature and velocity in the thermosphere. The wind is computed by HWM14 model if hwm14DataDirectory is provided. A quiet thermosphere is assumed if inputfileMagnetic3hAp is not given. Thermosphere parameters from the JB2008 model: Bowman, B. R., Tobiska, W. K., Marcos, F. A., Huang, C. Y., Lin, C. S., Burke, W. J. (2008). A new empirical thermospheric density model JB2008 using new solar and geomagnetic indices. In AIAA/AAS Astrodynamics Specialist Conference and Exhibit. https://doi.org/10.2514/6.2008-6438 Thermosphere parameters from the NRLMSIS2 model: Emmert J.D, D.P.Drob, J.M. Picone, et al. (2020), NRLMSIS 2.0: A whole-atmosphere empirical model of temperature and neutral species densities. Earth and Space Science, Volume 8, 3 https://doi.org/10.1029/2020EA001321', 'config_table': 'inputfileSolfsmy filename solar indices inputfileDtc filename inputfileMagnetic3hAp filename indicies for wind model hwm14DataDirectory filename directory containing dwm07b104i.dat, gd2qd.dat, hwm123114.bin inputfileMsis filename input NRLMSIS 2.0 inputfileModelParameters filename path to msis20.parm file inputfileMagnetic3hAp filename indicies for wind model hwm14DataDirectory filename directory containing dwm07b104i.dat, gd2qd.dat, hwm123114.bin', 'display_text': 'This class provides functions for calculating the density, temperature and velocity in the thermosphere. The wind is computed by HWM14 model if hwm14DataDirectory is provided. A quiet thermosphere is assumed if inputfileMagnetic3hAp is not given.
JB2008
Thermosphere parameters from the JB2008 model:
Bowman, B. R., Tobiska, W. K., Marcos, F. A., Huang, C. Y., Lin, C. S., Burke, W. J. (2008). A new empirical thermospheric density model JB2008 using new solar and geomagnetic indices. In AIAA/AAS Astrodynamics Specialist Conference and Exhibit. https://doi.org/10.2514/6.2008-6438
NRLMSIS2
Thermosphere parameters from the NRLMSIS2 model:
Emmert J.D, D.P.Drob, J.M. Picone, et al. (2020), NRLMSIS 2.0: A whole-atmosphere empirical model of temperature and neutral species densities. Earth and Space Science, Volume 8, 3 https://doi.org/10.1029/2020EA001321'},
'tidesType': { 'name': 'tidesType', 'key': 'tidesType', 'description': 'This class computes functionals of the time depending tide potential, e.g potential, acceleration or gravity gradients. If several instances of the class are given the results are summed up. Before summation every single result is multiplicated by a factor . To get the difference between two ocean tide models you must choose one factor by 1 and the other by -1. To get the mean of two models just set each factor to 0.5. This class computes the tide generating potential (TGP) of sun, moon and planets (Mercury, Venus, Mars, Jupiter, Saturn). It takes into account the flattening of the Earth (At the moment only at the acceleration level). The computed result is multiplied with factor . This class computes the earth tide according to the IERS2003 conventions. The values of solid Earth tide external potential Love numbers and the frequency dependent corrections of these values are given in the file . The effect of the permanent tide is removed if includePermanentTide is set to false. The computed result is multiplied with factor . The potential coefficients of the solid Earth pole tide according to the IERS2003 conventions are given by with is the scale , is the outPhase and are the wobble variables in seconds of arc. They are related to the polar motion variables according to The mean pole is approximated by a polynomial read from . The displacment is calculated with where is the horizontalDisplacement and is the verticalDisplacement . The computed result is multiplied with factor . The ocean pole tide is generated by the centrifugal effect of polar motion on the oceans. The potential coefficients of this effect is given by IERS2003 conventions are given by where the coefficients are read from file , is given by gammaReal and gammaImaginary and are the wobble variables in radians. They are related to the polar motion variables according to The mean pole is approximated by a polynomial read from . The computed result is multiplied with factor . The time variable potential of ocean tides is given by a fourier expansion where and are spherical harmonics expansions and are read from the file . If set the expansion is limited in the range between minDegree and maxDegree inclusivly. are the arguments of the tide constituents : where are the Doodson\'s fundamental arguments ( ) and are the Doodson multipliers for the term at frequency . The major constituents given by can be used to interpolate minor tidal constituents using the file . This file can be created with DoodsonHarmonicsCalculateAdmittance . After the interpolation step a selection of the computed constituents can be choosen by . Only these constiuents are considered for the results. If no is set all constituents will be used. The constituents can be coded as Doodson number (e.g. 255.555) or as names intoduced by Darwin (e.g. M2). The computed result is multiplied with factor . Computes the centrifugal potential in a rotating system The current rotation vector is computed from the provided by the calling program. The computed result is multiplied with factor . Be careful, the centrifugal potential is not harmonic. Convolution with a harmonic kernel (e.g. to compute gravity anomalies) is not meaningful. This class computes the solid moon tide according to the IERS2010 conventions. The values of solid Moon tide external potential Love numbers are given and there are no frequency dependent corrections of these values. The computed result is multiplied with factor . Groups a set of and has no further effect itself.', 'config_table': 'useMoon boolean TGP of moon useSun boolean TGP of sun usePlanets boolean TGP of planets useEarth boolean TGP of Earth c20Earth double J2 flattening of the Earth factor double the result is multiplied by this factor, set -1 to subtract the field inputfileEarthtide filename includePermanentTide boolean results in FALSE: zero tide, TRUE: tide free gravity field factor double the result is multiplied by this factor, set -1 to subtract the field scale double outPhase double inputfileMeanPole filename horizontalDisplacement double [m] verticalDisplacement double [m] factor double the result is multiplied by this factor, set -1 to subtract the field inputfileOceanPole filename minDegree uint maxDegree uint gammaReal double gammaImaginary double inputfileMeanPole filename factor double the result is multiplied by this factor, set -1 to subtract the field inputfileTides filename inputfileAdmittance filename interpolation of minor constituents selectDoodson doodson consider only these constituents, code number (e.g. 255.555) or darwin name (e.g. M2) minDegree uint maxDegree uint nodeCorr uint nodal corrections: 0-no corr, 1-IHO, 2-Schureman factor double the result is multiplied by this factor, set -1 to subtract the field factor double the result is multiplied by this factor, set -1 to subtract the field k20 double k30 double factor double the result is multiplied by this factor, set -1 to subtract the field tides tidesType factor double the result is multiplied by this factor', 'display_text': 'This class computes functionals of the time depending tide potential, e.g potential, acceleration or gravity gradients.
If several instances of the class are given the results are summed up. Before summation every single result is multiplicated by a factor. To get the difference between two ocean tide models you must choose one factor by 1 and the other by -1. To get the mean of two models just set each factor to 0.5.
AstronomicalTide
This class computes the tide generating potential (TGP) of sun, moon and planets (Mercury, Venus, Mars, Jupiter, Saturn). It takes into account the flattening of the Earth (At the moment only at the acceleration level).
The computed result is multiplied with factor.
EarthTide
This class computes the earth tide according to the IERS2003 conventions. The values of solid Earth tide external potential Love numbers and the frequency dependent corrections of these values are given in the file inputfileEarthtide. The effect of the permanent tide is removed if includePermanentTide is set to false.
The computed result is multiplied with factor.
PoleTide
The potential coefficients of the solid Earth pole tide according to the IERS2003 conventions are given by \\[ \\begin{split} \\Delta c_{21} &= s\\cdot(m_1 + o\\cdot m_2), \\\\ \\Delta s_{21} &= s\\cdot(m_2 - o\\cdot m_1), \\end{split} \\]with $s$ is the scale, $o$ is the outPhase and $(m_1,m_2)$ are the wobble variables in seconds of arc. They are related to the polar motion variables $(x_p,y_p)$ according to \\[ \\begin{split} m_1 &= (x_p - \\bar{x}_p), \\\\ m_2 &= -(y_p - \\bar{y}_p), \\end{split} \\]The mean pole $(\\bar{x}_p, \\bar{y}_p)$ is approximated by a polynomial read from inputfileMeanPole.
The displacment is calculated with \\[ \\begin{split} S_r &= -v\\sin2\\vartheta(m_1\\cos\\lambda+m_2\\sin\\lambda),\\\\ S_\\vartheta &= -h\\cos2\\vartheta(m_1\\cos\\lambda+m_2\\sin\\lambda),\\\\ S_\\lambda &= h\\cos\\vartheta(m_1\\sin\\lambda-m_2\\cos\\lambda), \\end{split} \\]where $h$ is the horizontalDisplacement and $v$ is the verticalDisplacement.
The computed result is multiplied with factor.
OceanPoleTide
The ocean pole tide is generated by the centrifugal effect of polar motion on the oceans. The potential coefficients of this effect is given by IERS2003 conventions are given by \\[ \\begin{Bmatrix} \\Delta c_{nm} \\\\ \\Delta s_{nm} \\end{Bmatrix}= \\begin{Bmatrix} c_{nm}^R \\\\ s_{nm}^R \\end{Bmatrix} (m_1\\gamma^R+m_2\\gamma^I)+ \\begin{Bmatrix} c_{nm}^I \\\\ s_{nm}^I \\end{Bmatrix} (m_2\\gamma^R-m_1\\gamma^I) \\]where the coefficients are read from file inputfileOceanPole, $\\gamma=\\gamma^R+i\\gamma^I$ is given by gammaReal and gammaImaginary and $(m_1,m_2)$ are the wobble variables in radians. They are related to the polar motion variables $(x_p,y_p)$ according to \\[ \\begin{split} m_1 &= (x_p - \\bar{x}_p), \\\\ m_2 &= -(y_p - \\bar{y}_p), \\end{split} \\]The mean pole $(\\bar{x}_p, \\bar{y}_p)$ is approximated by a polynomial read from inputfileMeanPole.
The computed result is multiplied with factor.
DoodsonHarmonicTide
The time variable potential of ocean tides is given by a fourier expansion \\[ V(\\M x,t) = \\sum_{f} V_f^c(\\M x)\\cos(\\Theta_f(t)) + V_f^s(\\M x)\\sin(\\Theta_f(t)), \\]where $V_f^c(\\M x)$ and $V_f^s(\\M x)$ are spherical harmonics expansions and are read from the file inputfileDoodsonHarmonic. If set the expansion is limited in the range between minDegree and maxDegree inclusivly. $\\Theta_f(t)$ are the arguments of the tide constituents $f$: \\[ \\Theta_f(t) = \\sum_{i=1}^6 n_f^i\\beta_i(t), \\]where $\\beta_i(t)$ are the Doodson\'s fundamental arguments ($\\tau,s,h,p,N\',p_s$) and $n_f^i$ are the Doodson multipliers for the term at frequency $f$.
After the interpolation step a selection of the computed constituents can be choosen by selectDoodson. Only these constiuents are considered for the results. If no selectDoodson is set all constituents will be used. The constituents can be coded as Doodson number (e.g. 255.555) or as names intoduced by Darwin (e.g. M2).
The computed result is multiplied with factor.
Centrifugal
Computes the centrifugal potential in a rotating system \\[ V(\\M r, t) = \\frac{1}{2} (\\M\\omega(t)\\times\\M r)^2. \\]The current rotation vector $\\M\\omega(t)$ is computed from the earthRotation provided by the calling program. The computed result is multiplied with factor.
Be careful, the centrifugal potential is not harmonic. Convolution with a harmonic kernel (e.g. to compute gravity anomalies) is not meaningful.
SolidMoonTide
This class computes the solid moon tide according to the IERS2010 conventions. The values of solid Moon tide external potential Love numbers are given and there are no frequency dependent corrections of these values. The computed result is multiplied with factor.
Group
Groups a set of tides and has no further effect itself.'},
'timeSeriesType': { 'name': 'timeSeriesType', 'key': 'timeSeriesType', 'description': 'This class generates a series of points in time. The series is always sorted in ascending order. Depending of the application the series is interpreted as list of points or as intervals between the points. Generates a time series with uniform sampling. The first point in time will be timeStart . The last generated point in time will be less or equal timeEnd . The time step between generated points in time is given by sampling . Generates a time series with uniform sampling between timeStart and timeEnd . intervallCount gives the count of intervals. This class generates count+1 points in time inclusive timeStart and timeEnd . The points of the time series are given explicitly with time . If useMonthMiddle is set, time points are generated at mid of each month inclusively the monthStart in yearStart and monthEnd in yearEnd . Otherwise times are given at the first of each month and a time point after the last month. If useYearMiddle is set, time points are generated at mid of each year inclusively yearStart and yearEnd . Otherwise times are given at the first of each year and a time point after the last year. Generates a time series with monthly sampling. The first point in time will be timeStart and the following points are generated for each month at the same day and time in month. The last generated point in time will be less or equal timeEnd . Generates a time series with yearly sampling. The first point in time will be timeStart and the following points are generated for each year at the same day and time in year. The last generated point in time will be less or equal timeEnd . Read a time series (epochs) from an instrument file . The time series can be restricted to the interval starting from timeStart and before timeEnd . Reconstruct a time series from an instrument file . The time series is the first epoch of each arc plus one time step beyond the last epoch of the last arc (using median sampling). Reads an orbit file and create a time stamp for each ascending equator crossing. The time series can be restricted to the interval starting from timeStart and before timeEnd . In a first step a is generated. In a second step all times are removed which are in range before or after excludeMargin seconds of the times given by . Only times for which the is met are included in the time series. The variableLoopTime is set to every time and the is evaluated. Interpolates nodeInterpolation count points between the given uniformly.', 'config_table': 'timeStart time first point in time timeEnd time last point in time will be less or equal timeEnd sampling time time step between points in time timeStart time 1st point of the time series timeEnd time last point of the time series intervalCount uint count of intervals, count+1 points in time will generated time time explicit list of points in time monthStart uint yearStart uint monthEnd uint yearEnd uint useMonthMiddle boolean time points are mid of months, otherwise the 1st of each month + a time point behind the last month yearStart uint yearEnd uint useYearMiddle boolean time points are mid of years, otherwise the 1st of each year + a time point behind the last year timeStart time first point in time timeEnd time last point in time will be less or equal timeEnd timeStart time first point in time timeEnd time last point in time will be less or equal timeEnd inputfileInstrument filename timeStart time inclusive, i.e. exclude eochs before this epoch timeEnd time exclusive, i.e. only epochs before this time are used inputfileInstrument filename Must be regular. Time series is first epoch of each arc plus one time step extrapolated from last epoch of last arc. inputfileOrbit filename timeStart time exclude eochs before this epoch timeEnd time only epochs before this time are used timeSeries timeSeriesType time series to be created timeSeriesExclude timeSeriesType exclude this time points from time series (within margin) excludeMargin double on both sides [seconds] timeSeries timeSeriesType only times for which condition is met will be included variableLoopTime string variable with time of each loop condition conditionType test for each time timeSeries timeSeriesType time series to be created nodeInterpolation uint interpolates count points in each time interval given by the time series', 'display_text': 'This class generates a series of points in time. The series is always sorted in ascending order. Depending of the application the series is interpreted as list of points or as intervals between the points.
UniformSampling
Generates a time series with uniform sampling. The first point in time will be timeStart. The last generated point in time will be less or equal timeEnd. The time step between generated points in time is given by sampling.
UniformInterval
Generates a time series with uniform sampling between timeStart and timeEnd. intervallCount gives the count of intervals. This class generates count+1 points in time inclusive timeStart and timeEnd.
Irregular
The points of the time series are given explicitly with time.
Monthly
If useMonthMiddle is set, time points are generated at mid of each month inclusively the monthStart in yearStart and monthEnd in yearEnd. Otherwise times are given at the first of each month and a time point after the last month.
Yearly
If useYearMiddle is set, time points are generated at mid of each year inclusively yearStart and yearEnd. Otherwise times are given at the first of each year and a time point after the last year.
EveryMonth
Generates a time series with monthly sampling. The first point in time will be timeStart and the following points are generated for each month at the same day and time in month. The last generated point in time will be less or equal timeEnd.
EveryYear
Generates a time series with yearly sampling. The first point in time will be timeStart and the following points are generated for each year at the same day and time in year. The last generated point in time will be less or equal timeEnd.
Instrument
Read a time series (epochs) from an instrument file. The time series can be restricted to the interval starting from timeStart and before timeEnd.
InstrumentArcIntervals
Reconstruct a time series from an instrument file. The time series is the first epoch of each arc plus one time step beyond the last epoch of the last arc (using median sampling).
Revolution
Reads an orbit file and create a time stamp for each ascending equator crossing. The time series can be restricted to the interval starting from timeStart and before timeEnd.
Exclude
In a first step a timeSeries is generated. In a second step all times are removed which are in range before or after excludeMargin seconds of the times given by timeSeriesExclude.
Conditional
Only times for which the condition is met are included in the time series. The variableLoopTime is set to every time and the condition is evaluated.
Interpolate
Interpolates nodeInterpolation count points between the given timeSeries uniformly.'},
'troposphereType': { 'name': 'troposphereType', 'key': 'troposphereType', 'description': 'This class provides functions for calculating and estimating the signal delay in the dry and wet atmosphere. Tropospheric delays based on the Vienna Mapping Functions 3 (VMF3) model (Landskron and Boehm 2017, DOI: ). Hydrostatic and wet mapping function coefficients ( , ) and zenith delays (ZHD, ZWD) have to be provided via . This file can contain either station-specific data (see ViennaMappingFunctionStation2File ) or data on a regular global grid (see ViennaMappingFunctionGrid2File ). In the second case mapping coefficients and zenith delays are interpolated to the requested coordinates. This includes a height correction that requires approximate meteorological data provided via . Tropospheric delays based on the Global Pressure and Temperature 3 (GPT3) model (Landskron and Boehm 2017, DOI: ). It is an empirical model derived from the Vienna Mapping Functions 3 (VMF3, see ) and thus does not require additional mapping coefficients and zenith delay values. Tropospheric delays based on the Mendes-Pavlis model that employs meteorological data. (Mendes et al. (2002), and Mendes and Pavlis (2004), ) The meteorological data have to be provided via . This file contains the temperature, air pressure and humidity and must be first generated using the programs Crd2NormalPoints , Cstg2NormalPoints , Merit2NormalPoints or Merit2FullRate .', 'config_table': 'inputfileVmfCoefficients filename ah, aw, zhd, zwd coefficients inputfileGpt filename gridded GPT data aHeight double parameter a (height correction) bHeight double parameter b (height correction) cHeight double parameter c (height correction) inputfileGpt filename gridded GPT data aHeight double parameter a (height correction) bHeight double parameter b (height correction) cHeight double parameter c (height correction) inputfileStationMeteorology filename', 'display_text': 'This class provides functions for calculating and estimating the signal delay in the dry and wet atmosphere.
ViennaMapping
Tropospheric delays based on the Vienna Mapping Functions 3 (VMF3) model (Landskron and Boehm 2017, DOI: 10.1007/s00190-017-1066-2).
Hydrostatic and wet mapping function coefficients ($a_h$, $a_w$) and zenith delays (ZHD, ZWD) have to be provided via inputfileVmfCoefficients. This file can contain either station-specific data (see ViennaMappingFunctionStation2File) or data on a regular global grid (see ViennaMappingFunctionGrid2File). In the second case mapping coefficients and zenith delays are interpolated to the requested coordinates. This includes a height correction that requires approximate meteorological data provided via inputfileGpt.
GPT
Tropospheric delays based on the Global Pressure and Temperature 3 (GPT3) model (Landskron and Boehm 2017, DOI: 10.1007/s00190-017-1066-2).
It is an empirical model derived from the Vienna Mapping Functions 3 (VMF3, see viennaMapping) and thus does not require additional mapping coefficients and zenith delay values.
MendesAndPavlis
Tropospheric delays based on the Mendes-Pavlis model that employs meteorological data. (Mendes et al. (2002), 10.1029/2001GL014394 and Mendes and Pavlis (2004), 110.1029/2004GL020308)
The meteorological data have to be provided via inputfileStationMeteorology. This file contains the temperature, air pressure and humidity and must be first generated using the programs Crd2NormalPoints, Cstg2NormalPoints, Merit2NormalPoints or Merit2FullRate.'},
'general.fileFormat': { 'name': 'general.fileFormat', 'key': 'general.fileFormat', 'description': 'All GROOPS files are written either in XML, JSON, binary, or ASCII format depending on the filename extension. .xml : XML format .json : JSON format .dat : binary format .txt and all other extensions: ASCII format With an additional extension of \' .gz \' files are directly compressed and uncompressed. It is also possible to directly uncompress and read (but not write) \'d files (\' .Z \'). Comments are allowed in ASCII files and all the text starting from the character \' # \' to the end of the line is ignored. The program FileConvert can be used to convert between the different formats. This program is also useful to get some general information of files in binary format. The following special file types are used in GROOPS:', 'config_table': '', 'display_text': 'All GROOPS files are written either in XML, JSON, binary, or ASCII format depending on the filename extension.
.xml: XML format
.json: JSON format
.dat: binary format
.txt and all other extensions: ASCII format
With an additional extension of \'.gz\' files are directly compressed and uncompressed. It is also possible to directly uncompress and read (but not write) Unix compress\'d files (\'.Z\').
Comments are allowed in ASCII files and all the text starting from the character \'#\' to the end of the line is ignored.
The program FileConvert can be used to convert between the different formats. This program is also useful to get some general information of files in binary format.
The following special file types are used in GROOPS:'},
'fileFormat_admittance': { 'name': 'Admittance', 'key': 'fileFormat_admittance', 'description': 'Interpolation matrix to create ocean minor tides from modeled major tides. The file can be created with DoodsonHarmonicsCalculateAdmittance and used e.g. in Interpolation matrix to create ocean minor tides from modeled major tides. The file can be created with . See DoodsonHarmonicsCalculateAdmittance .', 'config_table': '', 'display_text': 'Interpolation matrix to create ocean minor tides from modeled major tides. The file can be created with DoodsonHarmonicsCalculateAdmittance and used e.g. in doodsonHarmonicTide.
See DoodsonHarmonicsCalculateAdmittance.'},
'fileFormat_arcList': { 'name': 'ArcList', 'key': 'fileFormat_arcList', 'description': 'With the InstrumentSynchronize an instrument file can be divided into time intervals and within the intervals into arcs. This file provides the information about the mapping of arcs to time intervals. This file can be used for the variational equation approach or KalmanBuildNormals . groops arclist version=20200123 32 # number of times # time [MJD] first arc # ================================== 58909.000000000000000000 0 58910.000000000000000000 8 58911.000000000000000000 17 58912.000000000000000000 25 58913.000000000000000000 29 58914.000000000000000000 37 58915.000000000000000000 45 58916.000000000000000000 53 58917.000000000000000000 61 58918.000000000000000000 69 58919.000000000000000000 78 58920.000000000000000000 86 58921.000000000000000000 95 58922.000000000000000000 103 58923.000000000000000000 112 58924.000000000000000000 120 58925.000000000000000000 128 58926.000000000000000000 136 58927.000000000000000000 144 58928.000000000000000000 153 58929.000000000000000000 161 58930.000000000000000000 169 58931.000000000000000000 177 58932.000000000000000000 185 58933.000000000000000000 193 58934.000000000000000000 201 58935.000000000000000000 210 58936.000000000000000000 218 58937.000000000000000000 226 58938.000000000000000000 234 58939.000000000000000000 242 58940.000000000000000000 250', 'config_table': '', 'display_text': 'With the InstrumentSynchronize an instrument file can be divided into time intervals and within the intervals into arcs. This file provides the information about the mapping of arcs to time intervals.
This file can be used for the variational equation approach or KalmanBuildNormals.
'},
'fileFormat_doodsonHarmonic': { 'name': 'DoodsonHarmonic', 'key': 'fileFormat_doodsonHarmonic', 'description': 'Ocean tides are represented as time variable gravitational potential and is given by a fourier expansion where and are spherical harmonics. The are the arguments of the tide constituents : where are the Doodson\'s fundamental arguments ( ) and are the Doodson multipliers for the term at frequency . To extract the potential coefficients of and for each frequency use DoodsonHarmonics2PotentialCoefficients . See also PotentialCoefficients2DoodsonHarmonics .', 'config_table': '', 'display_text': 'Ocean tides are represented as time variable gravitational potential and is given by a fourier expansion \\[ V(\\M x,t) = \\sum_{f} V_f^c(\\M x)\\cos(\\Theta_f(t)) + V_f^s(\\M x)\\sin(\\Theta_f(t)), \\]where $V_f^c(\\M x)$ and $V_f^s(\\M x)$ are spherical harmonics. The $\\Theta_f(t)$ are the arguments of the tide constituents $f$: \\[ \\Theta_f(t) = \\sum_{i=1}^6 n_f^i\\beta_i(t), \\]where $\\beta_i(t)$ are the Doodson\'s fundamental arguments ($\\tau,s,h,p,N\',p_s$) and $n_f^i$ are the Doodson multipliers for the term at frequency $f$.
'},
'fileFormat_earthTide': { 'name': 'EarthTide', 'key': 'fileFormat_earthTide', 'description': 'Containing the Love numbers together with frequency corrections to compute the gravitational potential and the geometric displacements due to solid Earth tides. It is used by Containing the Love numbers together with frequency corrections to compute the gravitational potential and the geometric displacements due to solid Earth tides. It is used by .', 'config_table': '', 'display_text': 'Containing the Love numbers together with frequency corrections to compute the gravitational potential and the geometric displacements due to solid Earth tides. It is used by tides.'},
'fileFormat_ephemerides': { 'name': 'Ephemerides', 'key': 'fileFormat_ephemerides', 'description': 'Ephemerides of sun, moon, and planets stored as coefficients of Chebyshev polynomials. Used in Ephemerides of sun, moon, and planets stored as coefficients of Chebyshev polynomials. Used in . See also JplAscii2Ephemerides .', 'config_table': '', 'display_text': 'Ephemerides of sun, moon, and planets stored as coefficients of Chebyshev polynomials. Used in Ephemerides:jpl.
See also JplAscii2Ephemerides.'},
'fileFormat_gnssAntennaDefinition': { 'name': 'GnssAntennaDefinition', 'key': 'fileFormat_gnssAntennaDefinition', 'description': 'Contains a list of GNSS antennas which are identified by its name (type), serial, and radome. Each antenna consists of antenna center offsets (ACO) and antenna center variations (ACV) for different signal Contains a list of GNSS antennas which are identified by its name (type), serial, and radome. Each antenna consists of antenna center offsets (ACO) and antenna center variations (ACV) for different signal (code and phase). The ACV values for each type are stored in an elevation and azimuth dependent grid. See also GnssAntennaDefinitionCreate , GnssAntex2AntennaDefinition . <?xml version="1.0" encoding="UTF-8"?> <groops type="antennaDefinition" version="20190429"> <antennaCount>65</antennaCount> ... <antenna> <name>BLOCK IIIA</name> <serial>G074</serial> <radome>2018-109A</radome> <comment>PCO provided by the Aerospace Corporation, PV from estimations by ESA/CODE</comment> <pattern> <count>3</count> <cell> <type>*1*G**</type> <offset> <x>-1.23333333333333e-03</x> <y>4.33333333333333e-04</y> <z>3.15200000000000e-01</z> </offset> <dZenit>1.00000000000000e+00</dZenit> <pattern> <type>0</type> <rows>1</rows> <columns>18</columns> <cell row="0" col="0">1.39000000000000e-02</cell> <cell row="0" col="1">1.28000000000000e-02</cell> <cell row="0" col="2">1.02000000000000e-02</cell> <cell row="0" col="3">5.80000000000000e-03</cell> <cell row="0" col="4">1.10000000000000e-03</cell> <cell row="0" col="5">-4.50000000000000e-03</cell> <cell row="0" col="6">-9.70000000000000e-03</cell> <cell row="0" col="7">-1.28000000000000e-02</cell> <cell row="0" col="8">-1.34000000000000e-02</cell> <cell row="0" col="9">-1.18000000000000e-02</cell> <cell row="0" col="10">-8.90000000000000e-03</cell> <cell row="0" col="11">-4.50000000000000e-03</cell> <cell row="0" col="12">1.20000000000000e-03</cell> <cell row="0" col="13">7.20000000000000e-03</cell> <cell row="0" col="14">1.33000000000000e-02</cell> <cell row="0" col="15">1.33000000000000e-02</cell> <cell row="0" col="16">1.33000000000000e-02</cell> <cell row="0" col="17">1.33000000000000e-02</cell> </pattern> </cell> ... </pattern> </antenna> </groops>', 'config_table': '', 'display_text': 'Contains a list of GNSS antennas which are identified by its name (type), serial, and radome. Each antenna consists of antenna center offsets (ACO) and antenna center variations (ACV) for different signal types (code and phase). The ACV values for each type are stored in an elevation and azimuth dependent grid.
'},
'fileFormat_gnssReceiverDefinition': { 'name': 'GnssReceiverDefinition', 'key': 'fileFormat_gnssReceiverDefinition', 'description': 'Contains a list of GNSS receivers which are identified by its name, serial, and version. Defines for each receiver a list of signal Contains a list of GNSS receivers which are identified by its name, serial, and version. Defines for each receiver a list of signal which can be observed. Can also be used for GNSS transmitters to define a list of transmitted signal types. For GLONASS satellites the frequency number can be stored in the version field. See GnssReceiverDefinitionCreate . <?xml version="1.0" encoding="UTF-8"?> <groops type="receiverDefinition" version="20190429"> <receiverCount>112</receiverCount> <receiver> <name>GLONASS</name> <serial>R779</serial> <version>2</version> <comment/> <types> <count>4</count> <cell>*1CR**J</cell> <cell>*1PR**J</cell> <cell>*2CR**J</cell> <cell>*2PR**J</cell> </types> </receiver> ... <receiver> <name>GLONASS-K1</name> <serial>R802</serial> <version>7</version> <comment/> <types> <count>10</count> <cell>*1CR**O</cell> <cell>*1PR**O</cell> <cell>*2CR**O</cell> <cell>*2PR**O</cell> <cell>*3IR**</cell> <cell>*3QR**</cell> <cell>*4AR**</cell> <cell>*4BR**</cell> <cell>*6AR**</cell> <cell>*6BR**</cell> </types> </receiver> </groops>', 'config_table': '', 'display_text': 'Contains a list of GNSS receivers which are identified by its name, serial, and version. Defines for each receiver a list of signal types which can be observed. Can also be used for GNSS transmitters to define a list of transmitted signal types. For GLONASS satellites the frequency number can be stored in the version field.
'},
'fileFormat_gnssSignalBias': { 'name': 'GnssSignalBias', 'key': 'fileFormat_gnssSignalBias', 'description': 'Signal biases of GNSS transmitters or receivers for different Signal biases of GNSS transmitters or receivers for different . groops gnssSignalBias version=20200123 5 # number of signals # type bias [m] # =============================== C1CG06 -1.752461109688110974e-01 C1WG06 4.005884595055994590e-02 C2WG06 6.597469378913034532e-02 L1*G06 -2.736169875580296909e-02 L2*G06 3.422596762686257871e-02 See also GnssProcessing , GnssSimulateReceiver , GnssSignalBias2Matrix , GnssSignalBias2SinexBias .', 'config_table': '', 'display_text': 'Signal biases of GNSS transmitters or receivers for different gnssType.
groops gnssSignalBias version=20200123 5 # number of signals # type bias [m] # =============================== C1CG06 -1.752461109688110974e-01 C1WG06 4.005884595055994590e-02 C2WG06 6.597469378913034532e-02 L1*G06 -2.736169875580296909e-02 L2*G06 3.422596762686257871e-02
See also GnssProcessing, GnssSimulateReceiver, GnssSignalBias2Matrix, GnssSignalBias2SinexBias.'},
'fileFormat_griddedData': { 'name': 'GriddedData', 'key': 'fileFormat_griddedData', 'description': 'List of arbitrarily distributed points defined by geographic coordinates and ellipsoidal height. Each point can also have an associated area (projected on the unit sphere with a total area of ). This file format supports multiple values per point (called data0 , data1 and so on). For regular gridded data and binary format ( *.dat ) a more efficient storage scheme is used. See also: GriddedDataCreate . groops griddedData version=20200123 1 2 6.378137000000000000e+06 6.356752314140356146e+06 72 # hasArea, data columns, ellipoid a, ellipoid b, data rows # longitude [deg] latitude [deg] height [m] unit areas [-] data0 data1 # =========================================================================================================================================================== -1.650000000000000000e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -1.350000000000000000e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -1.050000000000000142e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -7.500000000000001421e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -4.500000000000002132e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -1.500000000000002132e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 1.499999999999997691e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 4.499999999999997868e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 7.499999999999997158e+01 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 1.049999999999999574e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 1.349999999999999432e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 1.649999999999999432e+02 7.500000000000000000e+01 0.000000000000000000e+00 7.014893453974438420e-02 1.000000000000000000e+00 2.000000000000000000e+00 -1.650000000000000000e+02 4.500000000000000711e+01 0.000000000000000000e+00 1.916504532594049681e-01 1.000000000000000000e+00 2.000000000000000000e+00 -1.350000000000000000e+02 4.500000000000000711e+01 0.000000000000000000e+00 1.916504532594049681e-01 1.000000000000000000e+00 2.000000000000000000e+00', 'config_table': '', 'display_text': 'List of arbitrarily distributed points defined by geographic coordinates and ellipsoidal height. Each point can also have an associated area (projected on the unit sphere with a total area of $4\\pi$). This file format supports multiple values per point (called data0, data1 and so on).
For regular gridded data and binary format (*.dat) a more efficient storage scheme is used.
'},
'fileFormat_griddedDataTimeSeries': { 'name': 'GriddedDataTimeSeries', 'key': 'fileFormat_griddedDataTimeSeries', 'description': 'Time series of data for arbitrarily distributed points defined by geographic coordinates and ellipsoidal height. The data can be temporal interpolated by basis splines . The file format consists of a griddedData , a time series, and for each spatial point and spline node pair multiple values called data0 , data1 , . A GriddedDataTimeSeries can be generated from individual griddedData with the program GriddedData2GriddedDataTimeSeries . Vice-versa, a GriddedDataTimeSeries can be evaluated at a specific time stamp to obtain a griddedData with GriddedDataTimeSeries2GriddedData .', 'config_table': '', 'display_text': 'Time series of data for arbitrarily distributed points defined by geographic coordinates and ellipsoidal height. The data can be temporal interpolated by basis splines. The file format consists of a griddedData, a time series, and for each spatial point and spline node pair multiple values called data0, data1, .
A GriddedDataTimeSeries can be generated from individual griddedData with the program GriddedData2GriddedDataTimeSeries. Vice-versa, a GriddedDataTimeSeries can be evaluated at a specific time stamp to obtain a griddedData with GriddedDataTimeSeries2GriddedData.'},
'fileFormat_instrument': { 'name': 'Instrument', 'key': 'fileFormat_instrument', 'description': 'This template file format can store different observations in a epoch wise manner. Each epoch consists of a time and additional data, e.g orbits, accelerometer data, star camera quaternions (see This template file format can store different observations in a epoch wise manner. Each epoch consists of a time and additional data, e.g orbits, accelerometer data, star camera quaternions (see ). The time series can be divided in several arcs (see InstrumentSynchronize ). Also a matrix file is allowed as one single arc. The first column must contain times [MJD]. Without any extra column the instrument type is INSTRUMENTTIME, with one additional column the type is MISCVALUE, and for more columns the type MISCVALUES is used. groops instrument version=20200123 # SATELLITETRACKING -9 60 # instrument type, number of arcs # Time [MJD] data0: range [m] data1: range-rate [m/s] data2: range-acc [m/s^2] # ===================================================================================================== 12 # number of epochs of 1. arc 54588.000000000000000000 -5.074649470097549492e+05 5.755440207134928654e-01 1.877605261528093308e-03 54588.000057870370255841 -5.074620458130163024e+05 5.849357691551860805e-01 1.878948916234051596e-03 54588.000115740740966430 -5.074590976427756250e+05 5.943331739937073310e-01 1.879937220634776869e-03 54588.000173611111222272 -5.074561024756557308e+05 6.037340169611068452e-01 1.880370529387525701e-03 54588.000231481481478113 -5.074530602992626373e+05 6.131368121270999172e-01 1.880680632122925426e-03 54588.000289351851733954 -5.074499711071007187e+05 6.225398878861636565e-01 1.880495369480403561e-03 54588.000347222222444543 -5.074468349029610981e+05 6.319414138081351773e-01 1.880073731783055927e-03 54588.000405092592700385 -5.074436516971451929e+05 6.413404243585696385e-01 1.879464843086203459e-03 54588.000462962962956226 -5.074404215058300761e+05 6.507353310092597320e-01 1.878578987216372124e-03 54588.000520833333212067 -5.074371443491023383e+05 6.601267978060636477e-01 1.877878184949659246e-03 54588.000578703703922656 -5.074338202460713219e+05 6.695136489207137442e-01 1.876962042758626532e-03 54588.000636574074178498 -5.074304492190054734e+05 6.788964444122400632e-01 1.876091925462087043e-03 12 # number of epochs of 2. arc 54588.000694444444434339 -5.074270312892858055e+05 6.882748400534359767e-01 1.875376456928801432e-03 54588.000752314814690180 -5.074235664742725785e+05 6.976508178537534910e-01 1.874929898412159559e-03 54588.000810185185400769 -5.074200547868391732e+05 7.070236200716006891e-01 1.874312324351668077e-03 54588.000868055555656611 -5.074164962409950094e+05 7.163943828291452487e-01 1.873924188388115340e-03 54588.000925925925912452 -5.074128908454515622e+05 7.257639682023964145e-01 1.874025826380292404e-03 54588.000983796296168293 -5.074092386012640782e+05 7.351333608427884636e-01 1.873680487441316657e-03 54588.001041666666878882 -5.074055395130896359e+05 7.445020815182646912e-01 1.873849502509668122e-03 54588.001099537037134724 -5.074017935789784533e+05 7.538716732272922050e-01 1.873971633320137753e-03 54588.001157407407390565 -5.073980007962241652e+05 7.632414098560330595e-01 1.873984767500571974e-03 54588.001215277777646406 -5.073941611626467784e+05 7.726123093411200182e-01 1.874295246964456478e-03 54588.001273148148356995 -5.073902746728868224e+05 7.819835205798950639e-01 1.874226146744964808e-03 54588.001331018518612836 -5.073863413272026228e+05 7.913547196412918927e-01 1.874173804634685515e-03', 'config_table': '', 'display_text': 'This template file format can store different observations in a epoch wise manner. Each epoch consists of a time and additional data, e.g orbits, accelerometer data, star camera quaternions (see InstrumentType). The time series can be divided in several arcs (see InstrumentSynchronize).
Also a matrix file is allowed as one single arc. The first column must contain times [MJD]. Without any extra column the instrument type is INSTRUMENTTIME, with one additional column the type is MISCVALUE, and for more columns the type MISCVALUES is used.
'},
'fileFormat_matrix': { 'name': 'Matrix', 'key': 'fileFormat_matrix', 'description': 'Stores matrices and vectors. Only one triangle is written for symmetric or triangular matrices. The header (the matrix definition) is optional. Therefore a pure text with only numbers in columns are also allowed. This simplifies the handling of external data. Instead of a matrix file also an instrument file is allowed. The first column is the time [MJD], the other columns depends on the instrument type. groops matrix version=20200123 LowerSymmetricMatrix( 4 x 4 ) 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00', 'config_table': '', 'display_text': 'Stores matrices and vectors. Only one triangle is written for symmetric or triangular matrices.
The header (the matrix definition) is optional. Therefore a pure text with only numbers in columns are also allowed. This simplifies the handling of external data.
Instead of a matrix file also an instrument file is allowed. The first column is the time [MJD], the other columns depends on the instrument type.
'},
'fileFormat_meanPolarMotion': { 'name': 'MeanPolarMotion', 'key': 'fileFormat_meanPolarMotion', 'description': 'The mean pole of the Earth rotation is represented by a polynomial in a time interval. <?xml version="1.0" encoding="UTF-8"?> <groops type="meanPolarMotion" version="20200123"> <meanPolarMotion> <intervalCount>2</intervalCount> <interval> <timeStart>42778.0000000000000000</timeStart> <degree>3</degree> <xp>5.59741000000000e-02</xp> <xp>1.82430000000000e-03</xp> <xp>1.84130000000000e-04</xp> <xp>7.02400000000000e-06</xp> <yp>3.46346000000000e-01</yp> <yp>1.78960000000000e-03</yp> <yp>-1.07290000000000e-04</yp> <yp>-9.08000000000000e-07</yp> </interval> <interval> <timeStart>55197.0000000000000000</timeStart> <degree>1</degree> <xp>2.35130000000000e-02</xp> <xp>7.61410000000000e-03</xp> <yp>3.58891000000000e-01</yp> <yp>-6.28700000000000e-04</yp> </interval> </meanPolarMotion> </groops>', 'config_table': '', 'display_text': 'The mean pole of the Earth rotation is represented by a polynomial in a time interval.
'},
'fileFormat_normalEquation': { 'name': 'NormalEquation', 'key': 'fileFormat_normalEquation', 'description': 'Stores a system of normal equations This file format consists of multiple files. The file name normals.dat.gz corresponds to the following files: normals.dat.gz or normals.00.00.dat.gz ... normals.0n.0n.dat.gz : the normal matrix as matrix , normals.rightHandSide.dat.gz : the right hand side(s) as matrix , normals.parameterNames.txt : parameter names , normals.info.xml : u.a. containing the number of observations and the quadratic sum of (reduced) observations . A large normal matrix may be splitted into blocks and stored in multiple files. The block row/column number is indicated in the file name. Only the upper blocks of the sysmmetric matrix are considered. Matrix in blocks can be distributed on muliple nodes in parallel mode to efficiently use distributed memory.', 'config_table': '', 'display_text': 'Stores a system of normal equations \\[ \\M N \\hat{\\M x} = \\M n. \\] This file format consists of multiple files. The file name normals.dat.gz corresponds to the following files:
normals.dat.gz or normals.00.00.dat.gz ... normals.0n.0n.dat.gz: the normal matrix $\\M N$ as matrix,
normals.rightHandSide.dat.gz: the right hand side(s) $\\M n$ as matrix,
normals.info.xml: u.a. containing the number of observations and the quadratic sum of (reduced) observations $\\M l^T\\M P\\M l$.
A large normal matrix may be splitted into blocks and stored in multiple files. The block row/column number is indicated in the file name. Only the upper blocks of the sysmmetric matrix are considered. Matrix in blocks can be distributed on muliple nodes in parallel mode to efficiently use distributed memory.'},
'fileFormat_oceanPoleTide': { 'name': 'OceanPoleTide', 'key': 'fileFormat_oceanPoleTide', 'description': 'Describes the reaction of the ocean mass to the change of the centrifugal potential (polar wobble) in terms spherical harmonics. See also Iers2OceanPoleTide .', 'config_table': '', 'display_text': 'Describes the reaction of the ocean mass to the change of the centrifugal potential (polar wobble) in terms spherical harmonics.
See also Iers2OceanPoleTide.'},
'fileFormat_parameterName': { 'name': 'ParameterName', 'key': 'fileFormat_parameterName', 'description': 'Name of parameters of a system of normal equations or solution vector . A parameter name is a string <object>:<type>:<temporal>:<interval> containg four parts divided by : object: Object this parameter refers to, e.g. graceA , G023 , earth , type: Type of this parameter, e.g. accBias , position.x , temporal: Temporal representation of this parameter, e.g. trend , polynomial.degree1 , interval: Interval/epoch this parameter represents, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00 , 2018-01-01_00-00-00 . In the documentation a star ( * ) in the name means this part is untouched and useally set by other classes. Times are written as yyyy-mm-dd_hh-mm-ss and intervals (if not empty) as <timeStart>_<timeEnd> . See ParameterNamesCreate . groops parameterName version=20200123 # object:type:temporal:interval # ============================= 10080 # number of parameters karr:position.x::2018-06-01_00-00-00_2018-06-02_00-00-00 karr:position.y::2018-06-01_00-00-00_2018-06-02_00-00-00 karr:position.z::2018-06-01_00-00-00_2018-06-02_00-00-00 karr:troposphereWet:spline.n1:2018-06-01_00-00-00_2018-06-01_02-00-00 karr:troposphereWet:spline.n1:2018-06-01_00-00-00_2018-06-01_04-00-00 karr:troposphereWet:spline.n1:2018-06-01_02-00-00_2018-06-01_06-00-00 karr:troposphereWet:spline.n1:2018-06-01_04-00-00_2018-06-01_08-00-00 karr:troposphereWet:spline.n1:2018-06-01_06-00-00_2018-06-01_10-00-00 karr:troposphereWet:spline.n1:2018-06-01_08-00-00_2018-06-01_12-00-00 karr:troposphereWet:spline.n1:2018-06-01_10-00-00_2018-06-01_14-00-00 karr:troposphereWet:spline.n1:2018-06-01_12-00-00_2018-06-01_16-00-00 karr:troposphereWet:spline.n1:2018-06-01_14-00-00_2018-06-01_18-00-00 karr:troposphereWet:spline.n1:2018-06-01_16-00-00_2018-06-01_20-00-00 karr:troposphereWet:spline.n1:2018-06-01_18-00-00_2018-06-01_22-00-00 karr:troposphereWet:spline.n1:2018-06-01_20-00-00_2018-06-02_00-00-00 karr:troposphereWet:spline.n1:2018-06-01_22-00-00_2018-06-02_00-00-00 karr:troposphereGradient.x:spline.n1:2018-06-01_00-00-00_2018-06-02_00-00-00 karr:troposphereGradient.y:spline.n1:2018-06-01_00-00-00_2018-06-02_00-00-00 karr:troposphereGradient.x:spline.n1:2018-06-01_00-00-00_2018-06-02_00-00-00 karr:troposphereGradient.y:spline.n1:2018-06-01_00-00-00_2018-06-02_00-00-00 karr:signalBias01(+1.00L1CG**):: karr:signalBias02(+1.00L2WG**):: karr:signalBias03(+1.00L2XG**):: G01:solarRadiationPressure.ECOM.D0:: G01:solarRadiationPressure.ECOM.DC2:: G01:solarRadiationPressure.ECOM.DS2:: G01:solarRadiationPressure.ECOM.Y0:: G01:solarRadiationPressure.ECOM.B0:: G01:solarRadiationPressure.ECOM.BC1:: G01:solarRadiationPressure.ECOM.BS1:: G01:stochasticPulse.x::2018-06-01_12-00-00 G01:stochasticPulse.y::2018-06-01_12-00-00 G01:stochasticPulse.z::2018-06-01_12-00-00 G01:arc0.position0.x:: G01:arc0.position0.y:: G01:arc0.position0.z:: G01:arc0.velocity0.x:: G01:arc0.velocity0.y:: G01:arc0.velocity0.z:: G01:signalBias01(-1.00C1CG01):: G01:signalBias02(+1.00L1*G01):: G01:signalBias03(+1.00L2*G01)::', 'config_table': '', 'display_text': 'Name of parameters of a system of normal equations or solution vector.
A parameter name is a string <object>:<type>:<temporal>:<interval> containg four parts divided by :
object: Object this parameter refers to, e.g. graceA, G023, earth,
type: Type of this parameter, e.g. accBias, position.x,
temporal: Temporal representation of this parameter, e.g. trend, polynomial.degree1,
interval: Interval/epoch this parameter represents, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2018-01-01_00-00-00.
In the documentation a star (*) in the name means this part is untouched and useally set by other classes. Times are written as yyyy-mm-dd_hh-mm-ss and intervals (if not empty) as <timeStart>_<timeEnd>.
'},
'fileFormat_platform': { 'name': 'Platform', 'key': 'fileFormat_platform', 'description': 'Defines a platform with a local coordinate frame equipped with instruments. The platform might be a reference station, a low Earth satellite, or a transmitting GNSS satellite and is referenced by a marker name and number. The reference point (marker or center of mass (CoM)) can change in time relative to the local frame. Each equipped instrument is described at least by the following information name serial number coordinates in the local frame a time interval in which the instrument was active the orientation for antennas and reflectors. For GNSS satellites the platform defines the PRN. The different assigned SVNs are defined by the transmitting antennas. Platforms for GNSS stations can be created from station log files with GnssStationLog2Platform . Platforms for GNSS satellites can be created from an ANTEX file with GnssAntex2AntennaDefinition . See also PlatformCreate .', 'config_table': '', 'display_text': 'Defines a platform with a local coordinate frame equipped with instruments. The platform might be a reference station, a low Earth satellite, or a transmitting GNSS satellite and is referenced by a marker name and number. The reference point (marker or center of mass (CoM)) can change in time relative to the local frame.
Each equipped instrument is described at least by the following information
name
serial number
coordinates in the local frame
a time interval in which the instrument was active
the orientation for antennas and reflectors.
For GNSS satellites the platform defines the PRN. The different assigned SVNs are defined by the transmitting antennas.
'},
'fileFormat_polygon': { 'name': 'Polygon', 'key': 'fileFormat_polygon', 'description': 'List of longitude and latitudes to describe borders, e.g. river basins or continents. It is used in List of longitude and latitudes to describe borders, e.g. river basins or continents. It is used in . groops polygon version=20200123 2 # number of polygons 6 # number of points (1. polygon) # longitude [deg] latitude [deg] # ================================================== -1.598200000000000216e+02 2.203000000000000114e+01 -1.596200000000000045e+02 2.189999999999999858e+01 -1.593799999999999955e+02 2.189999999999999858e+01 -1.593000000000000114e+02 2.221999999999999886e+01 -1.595800000000000125e+02 2.221999999999999886e+01 -1.598200000000000216e+02 2.203000000000000114e+01 5 # number of points (2. polygon) # longitude [deg] latitude [deg] # ================================================== -7.900000000000000000e+01 2.669999999999999929e+01 -7.870000000000000284e+01 2.650000000000000000e+01 -7.823000000000000398e+01 2.667000000000000171e+01 -7.793000000000000682e+01 2.667000000000000171e+01 -7.779999999999999716e+01 2.646999999999999886e+01', 'config_table': '', 'display_text': 'List of longitude and latitudes to describe borders, e.g. river basins or continents. It is used in border:polygon.
groops polygon version=20200123 2 # number of polygons 6 # number of points (1. polygon) # longitude [deg] latitude [deg] # ================================================== -1.598200000000000216e+02 2.203000000000000114e+01 -1.596200000000000045e+02 2.189999999999999858e+01 -1.593799999999999955e+02 2.189999999999999858e+01 -1.593000000000000114e+02 2.221999999999999886e+01 -1.595800000000000125e+02 2.221999999999999886e+01 -1.598200000000000216e+02 2.203000000000000114e+01 5 # number of points (2. polygon) # longitude [deg] latitude [deg] # ================================================== -7.900000000000000000e+01 2.669999999999999929e+01 -7.870000000000000284e+01 2.650000000000000000e+01 -7.823000000000000398e+01 2.667000000000000171e+01 -7.793000000000000682e+01 2.667000000000000171e+01 -7.779999999999999716e+01 2.646999999999999886e+01
'},
'fileFormat_potentialCoefficients': { 'name': 'PotentialCoefficients', 'key': 'fileFormat_potentialCoefficients', 'description': 'The standard .gfc format as defined by the ICGEM is used in ASCII the format. Only the static part is used and temporal variations (e.g. trend) are ignored. To write additional information and temporal variations use PotentialCoefficients2Icgem .', 'config_table': '', 'display_text': 'The standard .gfc format as defined by the ICGEM is used in ASCII the format. Only the static part is used and temporal variations (e.g. trend) are ignored. To write additional information and temporal variations use PotentialCoefficients2Icgem.'},
'fileFormat_satelliteModel': { 'name': 'SatelliteModel', 'key': 'fileFormat_satelliteModel', 'description': 'Properties of a satellite to model non-conservative forces (e.g. Properties of a satellite to model non-conservative forces (e.g. ). The file may contain surface properties, mass, drag coefficients, and antenna thrust values. See SatelliteModelCreate and SinexMetadata2SatelliteModel . <?xml version="1.0" encoding="UTF-8"?> <groops type="satelliteModel" version="20190429"> <satelliteCount>1</satelliteCount> <satellite> <satelliteName>GALILEO-2</satelliteName> <mass>7.00000000000000e+02</mass> <coefficientDrag>0.00000000000000e+00</coefficientDrag> <surfaceCount>15</surfaceCount> <surface> <type>0</type> <normal> <x>-1.00000000000000e+00</x> <y>0.00000000000000e+00</y> <z>0.00000000000000e+00</z> </normal> <area>4.40000000000000e-01</area> <reflexionVisible>0.00000000000000e+00</reflexionVisible> <diffusionVisible>7.00000000000000e-02</diffusionVisible> <absorptionVisible>9.30000000000000e-01</absorptionVisible> <reflexionInfrared>1.00000000000000e-01</reflexionInfrared> <diffusionInfrared>1.00000000000000e-01</diffusionInfrared> <absorptionInfrared>8.00000000000000e-01</absorptionInfrared> <hasThermalReemission>1</hasThermalReemission> </surface> ... <modulCount>2</modulCount> <modul> <type>1</type> <rotationAxis> <x>0.00000000000000e+00</x> <y>1.00000000000000e+00</y> <z>0.00000000000000e+00</z> </rotationAxis> <normal> <x>0.00000000000000e+00</x> <y>0.00000000000000e+00</y> <z>1.00000000000000e+00</z> </normal> <surface> <count>4</count> <cell>11</cell> <cell>12</cell> <cell>13</cell> <cell>14</cell> </surface> </modul> <modul> <type>2</type> <antennaThrust> <x>0.00000000000000e+00</x> <y>0.00000000000000e+00</y> <z>2.65000000000000e+02</z> </antennaThrust> </modul> </satellite> </groops>', 'config_table': '', 'display_text': 'Properties of a satellite to model non-conservative forces (e.g. miscAccelerations). The file may contain surface properties, mass, drag coefficients, and antenna thrust values.
'},
'fileFormat_stringList': { 'name': 'StringList', 'key': 'fileFormat_stringList', 'description': 'White space separated list of strings. Comments are allowed and all the text from the character \' # \' to the end of the line is ignored. Strings containing white spaces or the \' # \' character must be set in quotes (\' "" \'). # IGSR3 stations abmf abpo ade1 adis ajac albh algo alic alrt amc2 aoml areq arev artu asc1', 'config_table': '', 'display_text': 'White space separated list of strings. Comments are allowed and all the text from the character \'#\' to the end of the line is ignored. Strings containing white spaces or the \'#\' character must be set in quotes (\'""\').
'},
'fileFormat_stringTable': { 'name': 'StringTable', 'key': 'fileFormat_stringTable', 'description': 'White space separated table of strings in row and columns. Additional columns in a row may represent alternatives, e.g. for core stations in a GNSS network. Comments are allowed and all the text from the character \' # \' to the end of the line is ignored. Strings containing white spaces or the \' # \' character must be set in quotes (\' "" \'). # core network with alternative stations artu mdvj mdvo nril asc1 sthl bahr bhr1 yibl nama chat chti auck chpi braz ufpr savo ckis nium coco xmis dgar dgav cro1 scub abmf lmmf aoml daej taej suwn osn1 darw kat1 tow2 alic dav1 maw1 drao albh will holb nano fair whit glps guat gode godz usno usn3 goug', 'config_table': '', 'display_text': 'White space separated table of strings in row and columns. Additional columns in a row may represent alternatives, e.g. for core stations in a GNSS network. Comments are allowed and all the text from the character \'#\' to the end of the line is ignored. Strings containing white spaces or the \'#\' character must be set in quotes (\'""\').
'},
'fileFormat_timeSplinesCovariance': { 'name': 'TimeSplinesCovariance', 'key': 'fileFormat_timeSplinesCovariance', 'description': 'Stores covariance information for TimeSplinesGravityField . It can be the variances of the potential coefficients or the full covariance matrix for each temporal nodal point.', 'config_table': '', 'display_text': 'Stores covariance information for TimeSplinesGravityField. It can be the variances of the potential coefficients or the full covariance matrix for each temporal nodal point.'},
'fileFormat_timeSplinesGravityField': { 'name': 'TimeSplinesGravityField', 'key': 'fileFormat_timeSplinesGravityField', 'description': 'Temporal changing gravity field, parametrized as spherical harmonics in the spatial domain and parametrized as basis splines in the time domain (see basis splines ). It is evaluated with Temporal changing gravity field, parametrized as spherical harmonics in the spatial domain and parametrized as basis splines in the time domain (see . See also: Gravityfield2TimeSplines , PotentialCoefficients2BlockMeanTimeSplines .', 'config_table': '', 'display_text': 'Temporal changing gravity field, parametrized as spherical harmonics in the spatial domain and parametrized as basis splines in the time domain (see basis splines). It is evaluated with gravityfield:timeSplines.
See also: Gravityfield2TimeSplines, PotentialCoefficients2BlockMeanTimeSplines.'},
'fileFormat_variationalEquation': { 'name': 'VariationalEquation', 'key': 'fileFormat_variationalEquation', 'description': 'Arcs with reference orbit and state transition matrices. The file contains a reference orbit (position and velocity), the derivatives of the orbit with respect to the satellite state vector for each arc, transformations (rotations) between the satellite, celestial, and terrestrial frame and a satellite macro model (see SatelliteModel ). The reference orbit can be extracted with Variational2OrbitAndStarCamera . See also: PreprocessingVariationalEquation .', 'config_table': '', 'display_text': 'Arcs with reference orbit and state transition matrices.
The file contains a reference orbit (position and velocity), the derivatives of the orbit with respect to the satellite state vector for each arc, transformations (rotations) between the satellite, celestial, and terrestrial frame and a satellite macro model (see SatelliteModel).
This class realize the transformation according to the IERS2010 conventions
given by the International Earth Rotation and Reference Systems Service (IERS).
A file with the earth orientation parameter is needed (inputfileEOP).
Name
Type
Annotation
inputfileEOP
filename
truncatedNutation
boolean
use truncated nutation model (IAU2006B)
Iers2010b
This class realize the transformation according to the IERS2010 conventions
given by the International Earth Rotation and Reference Systems Service (IERS).
A file with the earth orientation parameter is needed (inputfileEOP).
Includes additional high-frequency EOP models (inputfileDoodsonEOP).
Name
Type
Annotation
inputfileEOP
filename
inputfileDoodsonEOP
filename
Iers2003
This class realize the transformation according to IERS2003 conventions
given by the International Earth Rotation and Reference Systems Service (IERS).
A file with the earth orientation parameter is needed (inputfileEOP).
The transformation is realized as rotation about the z-axis.
The angle ist given by the Earth Rotation Angle (ERA) as
const Time T = timeUT1-mjd2time(J2000);
ERA = fmod(2*PI*(0.7790572732640 + T.mjdMod() + 0.00273781191135448*T.mjd()), 2*PI);
Z-Axis
The transformation is realized as rotation about the z-axis.
You must specify the angle (initialAngle) at time0 and
the angular velocity (angularVelocity).
Name
Type
Annotation
initialAngle
double
Angle at time0 [rad]
angularVelocity
double
[rad/s]
time0
time
StarCamera
This class reads quaternions from an instrument file and interpolates to the given time stamp.
Name
Type
Annotation
inputfileStarCamera
filename
interpolationDegree
uint
degree of interpolation polynomial
MoonRotation
This class realizes the transformation between the moon-fixed system
(Principal Axis System (PA) or Mean Earth System (ME))
and the ICRS according to the JPL ephemeris file.
Shadowing of satellites by moon and Earth provided as factor
between $[0,1]$ with 0: full shadow and 1: full sun light.
Conical
Figure: Modelling umbra and penumbra.
SOLAARS
Earth’s penumbra modeling with Solar radiation pressure with
Oblateness and Lower Atmospheric Absorption, Refraction, and Scattering (SOLAARS).
See Robertson, Robbie. (2015),
Highly Physical Solar Radiation Pressure Modeling During Penumbra Transitions (pp. 67-75).
With the InstrumentSynchronize an instrument file can
be divided into time intervals and within the intervals into arcs.
This file provides the information about the mapping of arcs to time intervals.
This file can be used for the variational equation approach or KalmanBuildNormals.
Ocean tides are represented as time variable gravitational potential
and is given by a fourier expansion
\[
V(\M x,t) = \sum_{f} V_f^c(\M x)\cos(\Theta_f(t)) + V_f^s(\M x)\sin(\Theta_f(t)),
\]where $V_f^c(\M x)$ and $V_f^s(\M x)$ are spherical harmonics.
The $\Theta_f(t)$ are the arguments of the tide constituents $f$:
\[
\Theta_f(t) = \sum_{i=1}^6 n_f^i\beta_i(t),
\]where $\beta_i(t)$ are the Doodson's fundamental arguments ($\tau,s,h,p,N',p_s$) and $n_f^i$
are the Doodson multipliers for the term at frequency $f$.
Containing the Love numbers together with frequency corrections to compute
the gravitational potential and the geometric displacements due to solid Earth tides.
It is used by tides.
Contains a list of GNSS antennas which are identified by its
name (type), serial, and radome. Each antenna consists of
antenna center offsets (ACO) and antenna center variations (ACV)
for different signal types (code and phase).
The ACV values for each type are stored in an elevation and azimuth dependent grid.
Contains a list of GNSS receivers which are identified by its
name, serial, and version. Defines for each receiver a list of
signal types which can be observed.
Can also be used for GNSS transmitters to define a list of
transmitted signal types. For GLONASS satellites the frequency
number can be stored in the version field.
List of arbitrarily distributed points defined by geographic coordinates and ellipsoidal
height. Each point can also have an associated area
(projected on the unit sphere with a total area of $4\pi$).
This file format supports multiple values per point (called data0, data1 and so on).
For regular gridded data and binary format (*.dat) a more efficient storage scheme is used.
Time series of data for arbitrarily distributed points defined by geographic coordinates and ellipsoidal
height. The data can be temporal interpolated by basis splines.
The file format consists of a griddedData, a time series, and
for each spatial point and spline node pair multiple values called data0, data1, … .
This template file format can store different observations in a epoch wise manner. Each epoch consists of a time and
additional data, e.g orbits, accelerometer data, star camera quaternions (see InstrumentType).
The time series can be divided in several arcs (see InstrumentSynchronize).
Also a matrix file is allowed as one single arc. The first column must contain times [MJD]. Without any extra column
the instrument type is INSTRUMENTTIME, with one additional column the type is MISCVALUE, and for more columns the type
MISCVALUES is used.
Stores matrices and vectors. Only one triangle is written for symmetric or triangular matrices.
The header (the matrix definition) is optional.
Therefore a pure text with only numbers in columns are also allowed.
This simplifies the handling of external data.
Instead of a matrix file also an instrument file is allowed.
The first column is the time [MJD], the other columns depends on the instrument type.
Stores a system of normal equations
\[
\M N \hat{\M x} = \M n.
\]
This file format consists of multiple files.
The file name normals.dat.gz corresponds to the following files:
normals.dat.gz or normals.00.00.dat.gz ... normals.0n.0n.dat.gz:
the normal matrix $\M N$ as matrix,
normals.rightHandSide.dat.gz:
the right hand side(s) $\M n$ as matrix,
normals.info.xml:
u.a. containing the number of observations and the quadratic sum of (reduced) observations $\M l^T\M P\M l$.
A large normal matrix may be splitted into blocks and stored in multiple files.
The block row/column number is indicated in the file name.
Only the upper blocks of the sysmmetric matrix are considered.
Matrix in blocks can be distributed on muliple nodes in parallel mode to efficiently use distributed memory.
A parameter name is a string <object>:<type>:<temporal>:<interval> containg four parts divided by :
object: Object this parameter refers to, e.g. graceA, G023, earth, …
type: Type of this parameter, e.g. accBias, position.x, …
temporal: Temporal representation of this parameter, e.g. trend, polynomial.degree1, …
interval: Interval/epoch this parameter represents, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2018-01-01_00-00-00.
In the documentation a star (*) in the name means this part is untouched and useally set by other classes.
Times are written as yyyy-mm-dd_hh-mm-ss and intervals (if not empty) as <timeStart>_<timeEnd>.
Defines a platform with a local coordinate frame equipped with instruments.
The platform might be a reference station, a low Earth satellite,
or a transmitting GNSS satellite and is referenced by a marker name and number.
The reference point (marker or center of mass (CoM)) can change in time
relative to the local frame.
Each equipped instrument is described at least by the following information
name
serial number
coordinates in the local frame
a time interval in which the instrument was active
the orientation for antennas and reflectors.
For GNSS satellites the platform defines the PRN. The different assigned SVNs
are defined by the transmitting antennas.
The standard .gfc format as defined by the ICGEM is used in ASCII the format.
Only the static part is used and temporal variations (e.g. trend) are ignored.
To write additional information and temporal variations use PotentialCoefficients2Icgem.
Properties of a satellite to model non-conservative forces (e.g. miscAccelerations).
The file may contain surface properties, mass, drag coefficients, and antenna thrust values.
White space separated list of strings.
Comments are allowed and all the text from the character '#' to the end of the line is ignored.
Strings containing white spaces or the '#' character must be set in quotes ('""').
White space separated table of strings in row and columns.
Additional columns in a row may represent alternatives, e.g. for core stations in a GNSS network.
Comments are allowed and all the text from the character '#' to the end of the line is ignored.
Strings containing white spaces or the '#' character must be set in quotes ('""').
Stores covariance information for TimeSplinesGravityField.
It can be the variances of the potential coefficients or the full covariance matrix for each
temporal nodal point.
Temporal changing gravity field, parametrized as spherical harmonics in the spatial domain and
parametrized as basis splines in the time domain (see basis splines).
It is evaluated with gravityfield:timeSplines.
Arcs with reference orbit and state transition matrices.
The file contains a reference orbit (position and velocity),
the derivatives of the orbit with respect to the satellite state vector for each arc,
transformations (rotations) between the satellite, celestial, and terrestrial frame
and a satellite macro model (see SatelliteModel).
================================================
FILE: docs/html/fundamentals.autoregressiveModel.html
================================================
GROOPS - Autoregressive model
Autoregressive Models
A multivariate (or vector) autoregressive model is one possible representation of a random process.
It specifies, that the output at epoch $t$ depends on the $p$ previous epochs, where $p$ is denoted process order,
plus a stochastic term.
In the following, finite order vector autoregressive - VAR($p$) in short - models as implemented in GROOPS will be described.
Definition
A finite order VAR($p$) model is defined as
\[
\mathbf{y}_e(t_i) = \sum_{k=1}^p \mathbf{\Phi}^{(p)}_k\mathbf{y}_e(t_{i-k}) + \mathbf{w}(t_i),
\hspace{5pt} \mathbf{w}(t_i) \sim \mathcal{N}(0, \mathbf{\Sigma}^{(p)}_\mathbf{w}),
\]where $\mathbf{y}_e(t_i)$ are realizations of a random vector process
Subtracting the right hand side and substituting the stochastic term $-\mathbf{w}(t_i)$ with the residual $\mathbf{v}(t_i)$ gives us
\[
\mathbf{0} = \mathbf{y}_e(t_i) - \sum_{k=1}^p \mathbf{\Phi}^{(p)}_k\mathbf{y}_e(t_{i-k}) + \mathbf{v}(t_i)
\]which can be used as pseudo-observation equations in the determination of the parameters $\mathbf{y}_e(t_i)$.
In matrix notation this reads
\[
0 =
\begin{bmatrix}
\mathbf{I} & -\mathbf{\Phi}^{(p)}_1 & \cdots & -\mathbf{\Phi}^{(p)}_p \\
\end{bmatrix}
\begin{bmatrix}
\mathbf{y}_e(t_i) \\
\mathbf{y}_e(t_{i-1}) \\
\vdots \\
\mathbf{y}_e(t_{i-p}) \\
\end{bmatrix}
+ \mathbf{v}(t_i).
\]After rearranging the vectors $\mathbf{x}_t$ to have ascending time stamps
\[
0 =
\begin{bmatrix}
-\mathbf{\Phi}^{(p)}_p & \cdots & -\mathbf{\Phi}^{(p)}_1 & \mathbf{I} \\
\end{bmatrix}
\begin{bmatrix}
\mathbf{y}_e(t_{i-p}) \\
\vdots \\
\mathbf{y}_e(t_{i-1}) \\
\mathbf{y}_e(t_i) \\
\end{bmatrix}
+ \mathbf{v}(t_i)
\]For practical purposes, the residuals above are further decorrelated using the
inverse square root of the white noise covariance matrix, leading to
\[
\bar{\mathbf{v}}(t_i) = \underbrace{\mathbf{\Sigma}^{(p)^{-\frac{1}{2}}}_\mathbf{w}}_{=\mathbf{W}}\mathbf{v}(t_i), \hspace{25pt} \bar{\mathbf{v}}(t_i) \sim \mathcal{N}(0, \mathbf{I}).
\]The used square root is in principle arbitrary, but should satisfy $\mathbf{W}^T\mathbf{W} = \mathbf{\Sigma}^{(p)}_\mathbf{w} $.
This means that both eigendecomposition based roots and Cholesky factors can be used.
After the applying the matrix from the left, we arrive at the observation equations
\[
0 =
\begin{bmatrix}
-\mathbf{W}\mathbf{\Phi}^{(p)}_p & \cdots & -\mathbf{W}\mathbf{\Phi}^{(p)}_1 & \mathbf{W} \\
\end{bmatrix}
\begin{bmatrix}
\mathbf{y}_e(t_{i-p}) \\
\vdots \\
\mathbf{y}_e(t_{i-1}) \\
\mathbf{y}_e(t_i) \\
\end{bmatrix}
+ \bar{\mathbf{v}}(t_i)
\]which yields fully decorrelated residuals.
Currenty, VAR($p$) models are saved to a single file which contains this matrix.
A time variable function is given by
\[
f(x,t) = \sum_i f_i(x)\Psi_i(t),
\]with the (spatial) coefficients $f_i(x)$ as parameters and the temporal basis functions $\Psi_i(t)$.
Basis splines are defined as polynomials of degree $n$ in intervals between nodal points in time $t_i$:
where $\tau$ is the normlized time in each time interval
\[
\tau_i = \frac{t-t_i}{t_{i+1}-t_i}.
\]The total number of coefficients $f_i(x)$ is $N=N_t+n-1$,
where $N_t$ is the count of nodal time points $t_i$ and $n$ is the degree.
Figure: Basis splines for different degrees with nodal points every 6 hours.
================================================
FILE: docs/html/fundamentals.robustLeastSquares.html
================================================
GROOPS - Robust least squares adjustment
Robust least squares adjustment
The robust least squares adjustment used in GROOPS is based on a modified Huber
estimator. It downweights observations with large otuliers iteratively.
The algorithm starts with a first solution with equal weights $\M P =\M I$
\[
\hat{\M x} = (\M A^T\M P\M A)^{-1}\M A^T\M P\M l.
\]The solution is used to compute the residuals
\[
\hat{e}_i = \left(\M l - \M A \hat{\M x}\right)_i
\]and the redundancies of all observations
\[
r_i = \left(\M I - \M A(\M A^T\M P\M A)^{-1}\M A^T\M P\right)_{ii}.
\]For observations with large residuals a new standard deviation is assigned
\[
\sigma_i =
\begin{cases}
1 & \text{for } \left|\frac{\hat{e}_i}{r_i}\right| \le h\cdot\hat{\sigma} \\
\left|\frac{\hat{e}_i}{r_ih}\right|^p & \text{for } \left|\frac{\hat{e}_i}{r_i}\right| > h\cdot\hat{\sigma},
\end{cases}
\]where $h$ is huber, $p$ is huberPower, and $\hat{\sigma}^2$ a robust overall variance factor
computed from all residuals.
The estimation is repeated huberMaxIteration times with a new weight matrix
\[
\M P = \text{diag}\left(\frac{1}{\sigma_1^2}, \frac{1}{\sigma_2^2}, \ldots, \frac{1}{\sigma_n^2}\right)
\] or until convergence is reached.
GROOPS is controlled by XML configuration files. One or more configuration files
must be passed as arguments to GROOPS:
groops config1.xml config2.xml [...]
These files can be created with the graphical user interface program groopsGui
in a convenient way (see section GUI).
A complete formal (computer readable) description of a configuration file
in the form of an XSD schema file can be created with the command
groops --xsd groops.xsd
A configuration file consists of a list of programs
that are executed in sequential order. Each program comes with its own config options
and they work independently without any internal communication between programs.
Data flow between programs is realized via files. An outputfile of one program can serve as
an inputfile for the next program.
Most programs are deliberately kept small and focused on a specific task. This modularity combined with the
general purpose design of many programs enables the creation of complex workflows with little effort.
Including loops and conditions in a config file provides even more flexibility.
Individual programs (and also other optional config elements) can be disabled
and are ignored during execution. Mandatory config elements are indicated by a star (*).
Empty optional elements are ignored or a meaningful default value is assumed.
The elements of a configuration file can be one of the following basic data types:
int: integer number
uint: unsigned integer number
double: floating point number
angle: given in degree
time: given in modified Julian date (MJD)
boolean: 0: false, 1: true
string: text
filename: absolute path to a file or path relative to the working directory
expression: numerical expression evaluated during execution
doodson: Doodson number or Darwin's name of a tidal frequency
gnssType: GNSS observation type according to the RINEX 3 definition
The first 5 data types also allow numerical expressions as input in addition to pure numbers.
In addition to these basic types, there are a large number of complex data types called classes,
which are described in section Classes.
Variables
In addition to programs, a config file can also include elements called variables.
These elements are comparable to read-only variables in programming and can be referenced from any program and config element.
This can be done by either linking an element directly to a variable or
by using the name as a variable in an expression of an input field (see section Parsers and variables).
While elements can only be directly linked to variables of the same type, this also supports complex data types
such as gravityfield. Thus it is possible to, for example, define a reference gravity field once
in the global section and use it multiple times in different programs.
Variables can be declared anywhere in the configuration file. Variables in locations other than the global section
have a local scope and hide global variables or variables from a hierarchy level above. They are valid after
declaration until the end of the hierarchy level is reached or a new variable with the same name is declared.
Variables are not evaluated directly when they are declared, but only later when they are used in a config element.
This means, for example, that a variable satelliteFile with data/swarm_orbit_{loopTime:%D}.dat
can be declared in the global section without the variable loopTime having to be known at this time.
One special variable is groopsDataDir, which is used as a variable in most default
file paths throughout many GROOPS programs. Since this variable is going to be needed in
most config files, it is recommended to define it in a template file that is used when creating
new config files in the GUI. See section Graphical User Interface (GUI)
for details on how to set up a template file.
In addition, the variables groopsConfigFile and workingDir are set automatically.
Global variables can be manipulated when running a config file by passing the argument --global <name>=<value>.
For example, running the command
runs the config file config.xml but replaces the values of the global variable timeStart
and satellite with 58849 and swarm, respectively. If a global variable passed as
an argument does not already exist in the config file, it will be added with the type string.
Only the basic data types listed above are supported. This feature can be useful when running GROOPS
from the command line or from an external script file.
================================================
FILE: docs/html/general.constants.html
================================================
GROOPS - Constants and the setting file
Constants and the settings file
GROOPS uses some built-in constants like DEFAULT_GM or the definition
of leap seconds, which are defined in source/base/constants.cpp.
A complete list of the constants can be written to an XML file with:
groops --write-settings <groopsDefaults.xml>
The built-in constants can be overwritten by a groopsDefaults.xml file
in the working directory or by explicitly passing the file as an argument at execution:
All GROOPS files are written either in XML, JSON, binary, or ASCII format depending on the filename extension.
.xml: XML format
.json: JSON format
.dat: binary format
.txt and all other extensions: ASCII format
With an additional extension of '.gz' files are directly compressed and uncompressed. It is also possible to directly uncompress and read (but not write) Unix compress'd files ('.Z').
Comments are allowed in ASCII files and all the text starting from the character '#' to the end of the line is ignored.
The program FileConvert can be used to convert between the different formats. This program is also useful to get
some general information of files in binary format.
The following special file types are used in GROOPS:
================================================
FILE: docs/html/general.gui.html
================================================
GROOPS - Graphical User Interface (GUI)
Graphical User Interface (GUI)
The graphical user interface program groopsGui enables the convenient creation of GROOPS config files.
It uses the Qt5 framework for cross-platform support.
Figure: Overview of the GUI (components mentioned below marked in red)
Settings and first-time setup
The GUI depends on an XSD schema file containing the complete formal (computer readable) description of a GROOPS config file.
This schema file can be created with the command:
groops --xsd <groopsDir>/groops.xsd
At least one schema file has to be set via
the menu Settings - Default Paths and Files. Setting more than one schema files enables the schema selector in the toolbar.
The selected schema will be used when (re-)opening or creating a config file.
This feature is useful when working with different versions of GROOPS at the same time.
It is possible to set a template file via the menu Settings - Default Paths and Files. This can be any GROOPS config file.
Whenever a new config file is created via the GUI, all global elements and programs defined in the template file are automatically created in the new config file.
It is highly recommended to create a template file containing at least the global element groopsDataDir of type filename.
This element is used as a variable in most default file paths throughout many GROOPS programs.
Thus, setting the path to the base directory containing all GROOPS data once in the template file, for example as
groopsDataDir=/home/<user>/groops/data, is the most convenient way to handle default paths in GROOPS.
The template file can also contain other often-used global elements, for example tmpDir or timeStart and timeEnd.
A working directory can be set via Settings - Default Paths and Files.
This directory is used as the default directory in the save dialog of new config files.
The GUI offers the option to open the GROOPS documentation for a selected program. To use this feature,
the GROOPS documentation must be generated (if not already present) with the command:
groops --doc <groopsDir>/docs/
In the menu Settings - Default Paths and Files the path to the HTML version of the documentation must be set (i.e. <groopsDir>/docs/html).
Selecting any program and pressing F1 opens the documentation for this program in an external browser.
Pressing F1 without having any program selected opens the main page of the GROOPS documentation.
Executing a config file from the GUI requires the setup of a run command in the menu Settings - Commands.
It is recommended for this command to open a new terminal in which GROOPS is executed with the config file given as an argument.
The placeholders %w and %f are replaced by the directory and file name of the selected config file, respectively.
Multiple commands can be set up, with the option to choose one of them in the run dialog.
Linux (GNOME): gnome-terminal --working-directory=%w -x bash -ic "groops %f; bash"
Windows, MPI with 4 processes: cd /d %w && mpiexec -n 4 groopsMPI.exe %f
Linux (KDE), MPI with 4 processes: konsole --workdir %w -e bash -ic "mpiexec -n 4 groopsMPI %f; bash"
Linux (GNOME), MPI with 4 processes: gnome-terminal --working-directory=%w -x bash -ic "mpiexec -n 4 groopsMPI %f; bash"
Basic features
Most basic features used to manipulate a config element are accessible via the context menu,
for example attributing loops and conditions or setting an element global.
Global elements automatically appear in the dropdown value list of config elements of the same type.
Selecting a global element from the dropdown list as a value links this config element to the global element.
In case the global element is removed, all linked elements' values are replaced by the value of the deleted global element.
The sidebar features three widgets:
Open Files: An overview of all open config files (select to change current tree)
Program List: A list of all programs defined in the schema of the active tree (filterable, supports drag and drop to tree, double click appends program)
Undo Stack: Tracks all changes in a config file (select to change state of tree)
In case the names of programs or config elements change over time, the GUI offers a rename feature to update outdated config files.
The changes must be documented in the schema using GROOPS' rename feature. Affected elements will be marked with an icon and
the context menu item Update name will be available to change the element to the new name defined in the schema.
Additional keyboard shortcuts
Tree navigation:
Enter: Switch focus from tree to input field of selected row
Escape: Switch focus from input field back to tree
Tab: Next sibling element (or next sibling of parent if there is no next sibling, or next child otherwise)
Shift+Tab: Previous sibling element (or parent if there is no previous sibling)
Ctrl+Tab: Next tab/tree
Ctrl+Shift+Tab: Previous tab/tree
Ctrl+Space: Interact with the element (e.g. filename/program: open dialog; time: switch focus between input fields)
Ctrl+Up/Down: Next/previous sibling element
Ctrl+Left/Right: Fold/expand (complex) element
Tree manipulation:
Ctrl+Shift+Up/Down: Move unbounded list element (e.g. program, layer) up/down
Drag and Drop of tabs to other programs (i.e. text editors) or other GUI windows:
Drag: Copy tab (= keep in source window)
Shift+Drag: Move tab (= remove from source window)
Drag and Drop GROOPS config file(s) into GUI:
Drag: Open file(s) in new tab(s)
Shift+Drag: Open file in current tab (replaces current tab, only works with a single file)
================================================
FILE: docs/html/general.loopsAndConditions.html
================================================
GROOPS - Loops and conditions
Loops and conditions
The program flow within a config file can be controlled by the classes loop
and condition. The easiest way to access these classes is with the programs
LoopPrograms and IfPrograms.
The programs defined in IfPrograms are only executed if the defined
condition is met. A typical example is to check whether a file that
should have been created in previous programs actually exists. Further options are string comparisons and
checking the result of a numerical expression or the return value of an external command.
With LoopPrograms it is possible to repeat the programs defined inside within a loop.
The class loop creates a sequence to loop over and defines variables
that contain the index and element for the current iteration.
The loop and condition can also be attributed to single
config elements (including programs). Config elements with an assigned loop are repeated, with the loop variables
being evaluated for each element. If a condition is attributed to a config element
in addition to a loop, each element within the loop is only created if the condition is met. Conditions can also
be attributed to optional elements without an associated loop.
If the condition is not met, the optional element will be treated as if it was not provided.
Example: A program needs all files in a download directory as input.
All the inputfiles can be selected manually of course, but it is much easier to assign
a loop variable with inputfile={loopFile} and attribute a
loop:directoryListing.
The loop lists the content of the download directory and assigns each file name to the
variableLoopFile=loopFile.
If GROOPS is compiled with the
Message Passing Interface (MPI),
most GROOPS programs can be run in parallel on multiple processor cores.
Processing on computer clusters with distributed memory is also supported.
Many loops are parallelized by computing each loop step at a different core.
Usually the first node distributes the work load, assigns loop steps to different cores,
and is not participating on the actual loop computation. This means running
GROOPS with only two nodes has no advantages in almost all cases.
Non-parallel parts and programs without parallel support
are executed at the first node only.
Large systems of normal equations, which are divided into blocks,
are distributed over the nodes to reduce the memory consumption on each single node.
As all nodes may read and write files (at least reading the config files)
the required part of the file system must be available on all participating computers.
The XML configuration file is evaluated by two parsers. In a first step a text parser is applied.
In the second step mathematical expressions are resolved to a number.
Variables (see section variables) can be referenced via their
name directly for the expression parser or in the form {name} for the text parser.
Mathematical expression parser
In all input fields that accept numbers (int, uint, double, angle, time) numerical
expressions are also allowed. Declared variables can be accessed via their name. The following
operations and functions are defined:
Constants: pi(), rho()=180/pi(), nan(), c(): light velocity,
G(): gravitational constant, GM(): gravitational constant of the Earth, R(): reference radius of the Earth
Mathematical: +, -, *, /, ^
Comparison: ==, !=, <, <=, >, >=, result is 1 or 0
Logical: not !, and &&, ||, or isnan(x), result is 1 or 0
Functions with 2 arguments: atan2(y,x), min(x,y), max(x,y), mod(x,y)
Time functions: now(): local time in MJD, date2mjd(year, month, day), gps2utc(mjd), utc2gps(mjd), dayofyear(mjd), decimalyear(mjd)
Condition: if(c,x,y): If the first argument is true (not 0), the second argument is evaluated, otherwise the third.
Text parser
Before the mathematical expression parser evaluates the expression, a simple text parser is applied.
The text parser is used for all input fields (also file names). It scans the text for terms like
{variable} and replaces it by the text content of the variable.
A literal '{' character must be escaped with '#{'.
The text parser allows regex replacements in the form {text/regex/replace}.
All matches of regex in the text are replaced by replace.
Possible {variables} in the three parts are evaluated beforehand.
Capturing groups () can be accessed by $1, $2,
… in the replacement ($0 is the complete match). Additional escape sequences are:
\l lowercase next char,
\u uppercase next char,
\L lowercase until \E,
\U uppercase until \E,
\Q quote (disable) pattern metacharacters until \E,
\E end either case modification or quoted section.
Examples:
{{variable}/test/text} replaces all occurrences of test by text.
{TEXT/.+/\L$0} converts text to lower case.
{012345/.#{2}(.#{3}).*/$1} extracts the substring at index 2 and length 3 resulting in 234.
Note the escaping #{.
The text parser also evaluates terms in the form {expression:format} and replaces it by a formatted
output. In order not to get confused with the regex replacements, the '/' character must be escaped
with '#/' in the expression. The format contains the text to be written as output.
It can contain embedded format specifiers that are replaced by the value of the expression and formatted
as requested (also multiple times). In the following, the resulting formatted output is given in the
brackets for an expression with the example value of 57493.8:
%i: Integer [57494]
%f: Decimal floating point [57493.800000]
%e: Scientific notation [5.749380e+04]
%g: Use the shortest representation: %e or %f [57493.8]
%c: Interpret number as ASCII character
%%: Write a single literal % character
The following specifiers interpret the value of the expression as MJD (modified Julian date):
%y: Four digit year [2016]
%Y: Two digit year [16]
%m: Month [04]
%d: Day of month [15]
%H: Hour [19]
%M: Minute [12]
%S: Second [00]
%D: Date (same as %y-%m-%d) [2016-04-15]
%T: Time (same as %H-%M-%S) [19-12-00]
%W: GPS week [1892]
%w: Day of GPS week (0..6) [5]
%O: Day of year (1..366)
The format can be specified further with %[width][.precision]specifier,
where [width] is the minimum number of characters to be printed.
If the value to be printed is shorter than this number, the result is padded with blank spaces
(or zeros if [width] starts with a zero).
The [.precision] defines the number of digits after the period (for %g the number of
significant digits instead).
Example:
Two variables time=57493+19/24+12/1440 and satellite=swarm are
set in the global section. The inputfile=data/{time:%y}/{satellite}_{time:%D}.dat
is expanded to "data/2016/swarm_2016-04-15.dat".
Example:
The variable x=3+5 is set in the global section.
The expression number=2*x is evaluated by the expression parser to =16.
In contrast if we use brackets like in number=2*{x} the expression is first evaluated
by the text parser to "2*3+5" and the expression parser now gives the result =11.
Variables for data
Some programs (e.g. FunctionsCalculate, InstrumentArcCalculate,
GriddedDataCalculate, or the plot programs)
read data (matrix) or gridded data
and evaluate input/output expressions for each data row.
For these kind of expressions additional variables are automatically defined for each data column
(X stands for the data column number: $0\ldots n$):
index: the row number, starting with zero
dataX: the value itself
dataXcount: number of rows
dataXmin
dataXmax
dataXsum
dataXmean
dataXrms: root mean square
dataXstd: standard deviation
dataXmedian
dataXmad: median absolute deviation
dataXstep: the minimal difference between two neighboring data points in the column
For gridded data input the following variables are additionally defined for each data point:
Select all or the first antenna from an antenna definition file
which matches the wildcards.
Name
Type
Annotation
inputfileAntennaDefinition
filename
name
string
serial
string
radome
string
onlyFirstMatch
boolean
otherwise all machting antennas included
FromStationInfo
Select all antennas from an antenna definition file
which are used by a station within a defined time interval.
With specializeAntenna an individual antenna is created for each different serial number
using the general type specific values from file.
Name
Type
Annotation
inputfileStationInfo
filename
inputfileAntennaDefinition
filename
timeStart
time
only antennas used in this time interval
timeEnd
time
only antennas used in this time interval
specializeAntenna
boolean
e.g. separate different serial numbers from stationInfo
Resample
The azimuth and elevation dependent antenna center variations (patterns) of all antennas
are resampled to a new resolution.
This class can be used to separate general antenna patterns for different gnssTypes.
If the antennas contain only one pattern for all GPS observations on the L1 frequency (*1*G**),
the patternTypes=C1*G** and L1*G** create two patterns with the *1*G** patterm as template.
The first matching pattern in the antenna is used as template.
Also new additionalPattern can be added (e.g. for *5*G**).
With addExistingPatterns all already existing patterns that don't match completely to any of the above are added.
The antenna offset and antenna variations (patterns) are inseparable parts of the
antenna model. With removeOffset an estimated offset is removed from
all selected patterns and added to the offset. With removeMean an estimated
constant is removed additionally as it cannot be seperated from signal biases.
The mean and offset are defined as discretized (deltaAzimuth,
dZenith) integral of the spherical cap from zenith down to maxZenith.
This class defines the models and parameters of the linearized observation equations
for all phase and code measurements (see GnssProcessing)
\[\label{gnssParametrizationType:model}
\M l - \M f(\M x_0) = \left.\frac{\partial \M f(\M x)}{\partial \M x}\right|_{\M x_0} \Delta\M x + \M\epsilon,
\]where the left side is the observation vector minus the effects computed from the a priori models.
After each least squares adjustment
(see GnssProcessing:processingStep:estimate)
the a priori parameters are updated
\[\label{gnssParametrizationType:update}
\M x_0 := \M x_0 + \Delta\hat{\M x}.
\]The vector $\M x_0$ can be written with
GnssProcessing:processingStep:writeAprioriSolution.
Any outputfiles defined in the parametrizations are written with
GnssProcessing:processingStep:writeResults.
Each parametrization (and possible constraint equations) has a name which enables
activating/deactivating the estimation of subsets of $\Delta\M x$ with
GnssProcessing:processingStep:selectParametrizations.
The a priori model $\M f(\M x_0)$ is unaffected and is always reduced.
The model for the different observation types can be described as
\[\label{gnssParametrizationType:gnssFullModel}
\begin{split}
f[\tau\nu a]_r^s(\M x) &= \text{geometry}(\M r_r^s) + \text{clock}^s(t) + \text{clock}_r(t) \\
&+ \text{ionosphere}([\tau\nu],t,\M r_r^s) + \text{troposphere}(t,\M r_r^s) \\
&+ \text{antenna}[\tau\nu a]^s + \text{antenna}[\tau\nu a]_r \\
&+ \text{bias}[\tau\nu a]^s + \text{bias}[\tau\nu a]_r
+ \lambda[L\nu] N[L\nu a]_r^s + \text{other}(\ldots) + \epsilon[\tau\nu a]_r^s
\end{split}
\]The notation $[\tau\nu a]_r^s$ describes the
attribution to a signal type $\tau$ (i.e., C or L), frequency $\nu$,
signal attribute $a$ (e.g., C, W, Q, X), transmitting satellite $s$, and observing receiver $r$.
It follows the RINEX 3 definition,
see GnssType.
The influence of the ionosphere is modelled by a STEC parameter (slant total electron content)
in terms of $[TECU]$ between each transmitter and receiver at each epoch. These parameters are pre-eliminated
from the observation equations before accumulating the normal equations.
This is similar to using the ionosphere-free linear combination as observations
but only one STEC parameter is needed for an arbitrary number of observation types.
The influence on the code and phase observation is modeled as
\[\label{gnssParametrizationType:IonosphereSTEC:STEC}
\begin{split}
\text{ionosphere}([C\nu], STEC) &= \frac{40.3}{f_{\nu}^2}STEC + \frac{7525\M b^T\M k}{f_{\nu}^3}STEC + \frac{r}{f_{\nu}^4}STEC^2 \\
\text{ionosphere}([L\nu], STEC) &= -\frac{40.3}{f_{\nu}^2}STEC - \frac{7525\M b^T\M k}{2f_{\nu}^3}STEC - \frac{r}{3f_{\nu}^4}STEC^2 + \text{bending}(E)STEC^2
\end{split}
\]The second order term depends on the magnetosphere $\M b$
and the direction of the signal $\M k$.
If further information about the ionosphere is available
(in the form of a prior model or as additional parametrizations
such as parametrization:ionosphereMap or
parametrization:ionosphereVTEC) the STEC
parameters describe local and short–term scintillations. The STEC parameters are estimated
as additions to the model and it is advised to constrain them towards zero
with a standard deviation of sigmaSTEC.
expr. for sigma [TECU] for STEC constraint, variable E (elevation) available
IonosphereVTEC
The influence of the ionosphere is modelled by a VTEC parameter (vertical total electron content)
in terms of $[TECU]$ for every selected receiver at each epoch. Optionally, VTEC gradients in the
North (x) and East (y) direction can be estimated via gradient.
The slant TEC is computed based on the VTEC and the optional North and East gradients $\Delta V_x$ and $\Delta V_y$
using the elevation-dependent Modified Single-Layer Model (MSLM) mapping function
\[\label{gnssParametrizationType:IonosphereVTEC:STEC}
STEC = \frac{VTEC + \cos(A) \Delta V_x + \sin(A) \Delta V_y}{\cos z'}
\qquad\text{with}\qquad
\sin z'= \left(\frac{R}{R+H}\right)\sin\left(\alpha(\pi/2-E)\right)
\]inserted into eq. \eqref{gnssParametrizationType:IonosphereSTEC:STEC},
where $A$ is the azimuth angle and $E$ is the elevation angle.
This class provides a simplified model of the ionosphere for single receivers
and enables the separation of the TEC and signal biases, meaning
parametrization:tecBiases becomes estimable.
Local and short-term scintillations should be considered by adding loosely constrained
parametrization:ionosphereSTEC.
The ionosphere is parametrized in terms of $[TECU]$ in a single layer sphere with
radiusIonosphericLayer as a temporally
changing (e.g. hourly linear splines) spherical harmonics expansion
\[
VTEC(\lambda,\theta,t) = \sum_{n=0}^{n_{max}} \sum_{m=0}^n c_{nm}(t)C_{nm}(\lambda,\theta)+s_{nm}(t)S_{nm}(\lambda,\theta)
\]up to maxDegree=15 in a solar-geomagentic frame defined
by magnetosphere. The VTEC values are mapped to STEC values
in the observation equations via eq. \eqref{gnssParametrizationType:IonosphereVTEC:STEC}.
Clock errors are estimated epoch-wise for each selectTransmitters/Receivers.
No clock errors are estimated if no valid observations are available (e.g. data gaps in the observations).
If all transmitters and receivers are selected by selectTransmitters and selectReceivers respectively,
these parameters will be lineary dependent which would lead to a rank deficiency in the normal equation
matrix. To circumvent this issue, the estimation requires an additional zero-mean constraint added in each epoch.
This is realized with an additional observation equation
\[
0 = \frac{1}{n_i + n_k} (\sum_i \Delta t^{s_i} + \sum_k \Delta t_{r_k})
\]summed over all selectTransmitters/ReceiversZeroMean
with a standard deviation of sigmaZeroMeanConstraint.
The clock error of an epoch can be predicted by the clock error
of the preceding epoch and an unknown clock drift
\[
\Delta t_{i+1} = \Delta t_{i} + t_{drift} dt + \epsilon_i.
\]This equation is applied as an additional constraint equation in each epoch
\[
0 = \Delta t_{i+1} - \Delta t_{i} - t_{drift} dt + \epsilon_i.
\]The variance $\sigma^2(\epsilon)$ is estimated iteratively by variance component estimation (VCE).
Clock jumps are treated as outliers and are automatically downweighted as described in
GnssProcessing:processingStep:estimate.
The absolute initial clock error and clock drift cannot be determined if all receiver
and transmitter clocks are estimated together due to their linear dependency.
This linear dependency would lead to a rank deficiency in the normal equation matrix in the same
manner as described in parametrization:clocks.
To circumvent this issue, an additional zero-mean constraint is added in each epoch
as observation equation
\[
0 = \frac{1}{n_i + n_k} (\sum_i \Delta t^{s_i} + \sum_k \Delta t_{r_k})
\]summed over all selectTransmitters/ReceiversZeroMean.
This should be a loose constraint with a relatively large standard deviation of sigmaZeroMeanConstraint.
The parameter names are <station or prn>:clock::<time>
and <station or prn>:clockDrift::.
(0 = unconstrained) sigma [m] for zero-mean constraint over all selected clocks
SignalBiases
Each code and phase observation (e.g C1C or L2W) contains a bias at transmitter/receiver level
\[
[\tau\nu a]_r^s(t) = \dots + \text{bias}[\tau\nu a]^s + \text{bias}[\tau\nu a]_r + \dots
\]This class provides the apriori model $\M f(\M x_0)$ of eq. \eqref{gnssParametrizationType:model} only.
The estimation of the biases is complex due to different linear dependencies, which
result in rank deficiencies in the system of normal equations.
For simplification the parametrization for $\Delta\M x$ has been split into:
parametrization:codeBiases,
parametrization:tecBiases, and
parametrization:ambiguities (including phase biases).
The file handling on the other hand still remains within this class. Any prior
values for the transmitter/receiver biases are read with the respective inputfileSignalBiasTransmitter/Receiver.
All biases for a transmitter/receiver are accumulated and written to the respective outputfileSignalBiasTransmitter/Receiver.
Sets up an ambiguity parameter for each track and phase observation type.
\[
[L\nu a]_r^s(t) = \dots + \text{bias}[L\nu a]^s + \text{bias}[L\nu a]_r + \lambda[L\nu] N[L\nu a]_r^s
\]As the phase observations contain a float bias at transmitter/receiver level, not all ambiguities
are resolvable to integer values. The number of resolvable ambiguities can be increased with
known phase biases read from file via parametrization:signalBiases.
In this case, estimateTransmitter/ReceiverPhaseBias should
not be used for the corresponding transmitters and receivers.
In case of GLONASS, the phase biases at receiver level differ between different frequency channels
(frequency division multiple access, FDMA) and for each channel an extra float phase bias is estimated.
With linearGlonassBias a linear relationship between bias and frequency channel is assumed,
which reduces the number of float bias parameters and increases the number of resolvable integer ambiguities.
Each code observation (e.g C1C or C2W) contains a bias at transmitter/receiver level
\[
[C\nu a]_r^s(t) = \dots + \text{bias}[C\nu a]^s + \text{bias}[C\nu a]_r + \dots
\]The code biases cannot be estimated together with clock errors and ionospheric delays in an absolute sense
as rank deficiencies will occur in the system of normal equations. Therefore, the biases are not initialized and set up
as parameters directly but only estimable linear combinations are parametrized.
The basic idea is to set up simplified normal equations with the biases,
clock and STEC parameters of one single receiver or transmitter,
eliminate clock and STEC parameters and perform an eigen value decomposition
of the normal equation matrix
\[
\M N = \M Q \M\Lambda \M Q^T.
\]Instead of estimating the original bias parameter $\M x$ a transformed set $\bar{\M x}$
is introduced:
\[
\bar{\M x} = \M Q^T \M x.
\]The new parameters corresponding to eigen values $\lambda>0$ are estimable,
the others are left out (set to zero). The behavior can be controlled by explicitly setting up to two bias types
with typesClockDatum for each transmitter to zero. These then define the ionosphere-free clock datum
of the transmitter. The missing linear combinations,
which depend on the STEC parameters, can be added with
parametrization:tecBiases.
Additional rank deficiencies may also occur when biases of transmitters and receivers are estimated together.
The minimum norm nullspace (also via eigen value decomposition)
is formulated as zero constraint equations and added with a standard deviation of sigmaZeroMeanConstraint.
In case of GLONASS the code biases at receiver level can differ between different frequency channels
(frequency division multiple access, FDMA) and for each channel an extra code bias is estimated.
With linearGlonassBias a linear relationship between bias and frequency channel is assumed,
which reduces the number of bias parameters.
first two matching types define the ionosphere free transmitter clock (e.g. C1WG, C2WG)
nameConstraint
string
used for parameter selection
sigmaZeroMeanConstraint
double
(0 = unconstrained) sigma [m] for null space constraint
TecBiases
Each code observation (e.g C1C or C2W) contains a bias at transmitter/receiver level
\[
[C\nu a]_r^s(t) = \dots + \text{bias}[C\nu a]^s + \text{bias}[C\nu a]_r + \ldots
\]This parametrization represents the linear combination of signal biases
which completely depend on the STEC parameters. Ignoring these bias combinations would result
in a biased STEC estimation (all other parameters are nearly unaffected).
To determine this part of the signal biases
the parametrization:ionosphereSTEC should be constrained.
Furthermore, additional information about the ionosphere is required from
parametrization:ionosphereVTEC or
parametrization:ionosphereMap.
Rank deficiencies due to the signal bias parameters may occur if biases of
transmitters and receivers are estimated together.
The minimum norm nullspace is formulated as zero constraint equations and added with
a standard deviation of sigmaZeroMeanConstraint.
phase or code biases depend linear on frequency channel number
nameConstraint
string
used for parameter selection
sigmaZeroMeanConstraint
double
(0 = unconstrained) sigma [m] for null space constraint
TemporalBias
This parametrization resolves the issue of some phase observations suffering from time-variable biases.
Such a phenomenon has been found to affect GPS block IIF satellites on the L5 phase measurements
(see Montenbruck et al. 2011, DOI: 10.1007/s10291-011-0232-x).
For these time-variable biases an appropriate temporal representation has to be defined in
parametrizationTemporal.
For example, time-variable biases for GPS block IIF L5 phase observations (type=L5*G)
can be represented by a cubic spline with a nodal distance of one hour.
This parametrization should be set up in addition to the constant
parametrization:signalBiases.
Depending on the temporal representation a temporal zero-mean constraint is needed
to separate this parametrization from the constant component. The constraint equations are added with
a standard deviation of sigmaZeroMeanConstraint.
The parameter names are
<prn>:signalBias.<gnssType>:<temporal>:<interval>.
(0 = unconstrained) sigma [m] for temporal zero-mean constraint
StaticPositions
Estimates a static position for all
selectReceivers in the terrestrial frame.
No-net constraints can be applied for a subset of stations,
selectNoNetReceivers, with a
standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma.
If the template inputfileNoNetPositions is provided
the constraints are applied relatively to these positions. Only stations with an existing position file
are considered. Without inputfileNoNetPositions
the constraints are applied towards the apriori values from
GnssProcessing:receiver.
As a single corrupted station position can disturb the no-net conditions,
the rotation/translation parameters are estimated in a
robust least squares adjustment
beforehand. The computed weight matrix is used to downweight corrupted stations
in the constraint equations.
In case you want to align to an ITRF/IGS reference frame, precise coordinates can be
generated with Sinex2StationPositions.
The estimation of (reduced) dynamic orbits is formulated as variational equations.
It is based on inputfileVariational calculated with PreprocessingVariationalEquation.
Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree.
The parametrizationAcceleration must include at least those
parameters that were estimated in PreprocessingVariationalEquationOrbitFit.
Additional stochasticPulse parameters can be set up to reduce orbit mismodeling.
If not enough epochs with observations are available (minEstimableEpochsRatio) the LEO satellite is disabled.
drop satellites with lower ratio of estimable epochs to total epochs
integrationDegree
uint
integration of forces by polynomial approximation of degree n
interpolationDegree
uint
for orbit interpolation and velocity calculation
Troposphere
A priori tropospheric correction is handled by a troposphere model (e.g. Vienna Mapping Functions 3).
Additional parameters in $[m]$ for zenith wet delay and gradients can be set up via
troposphereWetEstimation (usually 2-hourly linear splines)
and troposphereGradientEstimation (usually a daily trend).
These parameters can be soft-constrained using
parametrization:constraints
to avoid an unsolvable system of normal equations in case of data gaps.
Constraints on the defined parameters can be added via
parametrization:constraints.
An example would be to set up estimateUT1:constant
so the $dUT1$ parameter is included in the normal equation system . Since $dUT1$ cannot be
determined by GNSS, a hard constraint to its a priori value can then be added.
This class is for parametrization the antenna for their antenna center offsets (ACO) and
antenna center variations (ACV) by antennaCenterVariations.
The receivers to be estimated can be selected by selectReceivers.
The amount of patterns to be estimated is configurable with a list of patternTypes.
For each added patternTypes a set of parameters will be evaluated. The observations
will be assigned to the first patternTypes that matches their own.
E.g. having the patterns: ***G and L1* would lead to all GPS observations be assigned
to the observation equations of the first pattern. The pattern type L1* would then consist
of all other GNSS L1 phase observations. addNonMatchingTypes will, if activated, create automatically patterns
for observations that are not selected within the list patternTypes.
Furthermore, it is possible to group same antenna build types from different receivers by groupAntennas.
The grouping by same antenna build ignores antenna serial numbers.
Note that the apriori value $\M x_0$ for this parametrization is always zero and never updated
according to eq. \eqref{gnssParametrizationType:update}.
The parameter names are
<antennaName>:<antennaCenterVariations>.<gnssType>::.
add patterns for additional observed gnssTypes that don't match any of the above
groupAntennas
boolean
common ACVs for same antenna build types (ignores antenna serial number)
Constraints
Add a pseudo observation equation (constraint)
for each selected parameters
\[
b-x_0 = 1 \cdot dx + \epsilon,
\]where $b$ is the bias and $x_0$ is the a priori value of the parameter
if relativeToApriori is not set.
The standard deviation sigma is used to weight the observation equations.
Processing steps enable a dynamic definition of the consecutive steps performed during any kind of GNSS processing.
The most common steps are estimate, which performs an iterative least
squares adjustment, and writeResults, which writes all output files
defined in GnssProcessing and is usually the last step.
Some steps such as selectParametrizations,
selectEpochs,
selectNormalsBlockStructure, and
selectReceivers affect all subsequent steps.
In case these steps are used within a group or
forEachReceiverSeparately step,
they only affect the steps within this level.
Iterative non-linear least squares adjustment.
In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters.
The estimated parameters serve as a priori values in the next iteration and the following processing steps.
Iterates until either every single parameter update (converted to an influence in meters)
is below a convergenceThreshold or maxIterationCount is reached.
With computeResiduals the observation equations are computed
again after each update to compute the observation residuals.
The overall standard deviation of a single observation used for the weighting
is composed of several factors
\[
\hat{\sigma}_i = \hat{\sigma}_i^{huber} \hat{\sigma}_{[\tau\nu a]}^{recv} \sigma_{[\tau\nu a]}^{recv}(E,A),
\]where $[\tau\nu a]$ is the signal type, the azimuth and elevation dependent $\sigma_{[\tau\nu a]}^{recv}(E,A)$ is given by
receiver:inputfileAccuracyDefinition and the other factors are
estimated iteratively from the residuals.
With computeWeights a standardized variance $\hat{s}_i^2$
for each residual $\hat{\epsilon}_i$ is computed
\[
\hat{s}_i^2 = \frac{1}{\hat{\sigma}_{[\tau\nu a]}^{recv} \sigma_{[\tau\nu a]}^{recv}(E,A)}\frac{\hat{\epsilon}_i^2}{r_i}
\qquad\text{with}\qquad
r_i = \left(\M A\left(\M A^T\M A\right)^{-1}\M A^T\right)_{ii}
\]taking the redundancy $r_i$ into account. If $\hat{s}_i$ is above a threshold huber
the observation gets a higher standard deviation used for weighting according to
\[
\hat{\sigma}_i^{huber} =
\left\{ \begin{array}{ll}
1 & s < huber,\\
(\hat{s}_i/huber)^{huberPower} & s \ge huber
\end{array} \right.,
\]similar to robust least squares adjustment.
With adjustSigma0 individual variance factors can be computed
for each station and all phases of a system and each code observation type
(e.g. for each L**G, L**E, C1CG, C2WG, C1CE, … )
separately
\[
\hat{\sigma}_{[\tau\nu a]}^{recv} = \sqrt{\frac{\hat{\M\epsilon}^T\M P\hat{\M\epsilon}}{r}}.
\]
Name
Type
Annotation
computeResiduals
boolean
adjustSigma0
boolean
adjust sigma0 by scale factor (per receiver and type)
[m] stop iteration once full convergence is reached
maxIterationCount
uint
maximum number of iterations
ResolveAmbiguities
Performs a least squares adjustment like processingStep:estimate
but with additional integer phase ambiguity resolution.
After this step all resolved ambiguities are removed from the normal equation system.
Only ambiguities involving selectTransmitters/Receivers are resolved.
If selectTransmitters/Receivers is not set, all usable transmitters and/or
receivers are selected for ambiguity resolution.
Integer ambiguity resolution is performed based on the least squares ambiguity decorrelation adjustment
(LAMBDA) method (Teunissen 1995, DOI 10.1007/BF00863419), specifically
the modified algorithm (MLAMBDA) by Chang et al. (2005, DOI 10.1007/s00190-005-0004-x).
First the covariance matrix of the integer ambiguity parameters is computed by eliminating all but those parameters
from the full normal equation matrix and inverting it. Then, a Z-transformation is performed as described by
Chang et al. (2005) to decorrelate the ambiguity parameters without losing their integer nature.
The search process follows MLAMBDA and uses integer minimization of the weighted sum of squared residuals.
It is computationally infeasible to search a hyper-ellipsoid with a dimension of ten thousand or more.
Instead, a blocked search algorithm is performed by moving a window with a length of, for example,
searchBlockSize=200 parameters over the decorrelated ambiguities, starting from the most accurate.
In each step, the window is moved by half of its length and the overlapping parts are compared to each other.
If all fixed ambiguities in the overlap agree, the algorithm continues.
Otherwise, both windows are combined and the search is repeated using the combined window, again comparing with the overlapping
part of the preceding window. If not all solutions could be checked for a block after maxSearchSteps,
the selected incompleteAction is performed.
If the algorithm reaches ambiguities with a standard deviation higher than sigmaMaxResolve,
ambiguity resolution stops and the remaining ambiguities are left as float values.
Otherwise, all ambiguity parameters are fixed to integer values.
In contrast to an integer least squares solution over the full ambiguity vector, it is not guaranteed that the resulting solution
is optimal in the sense of minimal variance with given covariance.
This trade-off is necessary to cope with large numbers of ambiguities.
Accumulates the normal equations and computes the covariance matrix as inverse of the normal matrix.
It is not the full inverse but only the elements which are set in the normal matrix
(see gnssProcessingStep:selectNormalsBlockStructure)
are computed. The matrix is passed to the parametrizations.
Only used in parametrizations:kinematicPositions
to get the epoch-wise covariance information at the moment.
It is usually the last processing step, but can also be used at other points in the
processing in combination with suffix to write intermediate results, for example
before gnssProcessingStep:resolveAmbiguities to
output the float solution.
Name
Type
Annotation
suffix
string
appended to every output file name (e.g. orbit.G01.suffix.dat)
WriteNormalEquations
Accumulates the normal equations matrix and writes it.
If remainingParameters
is set only the selected parameters are written to the normal equations
and all other parameters are eliminated beforehand (implicitly solved).
parameter order/selection of output normal equations
WriteResiduals
Writes the observation residuals for all
selectReceivers.
For each station a file is written. The file name is interpreted as
a template with the variable {station} being replaced by the station name.
Enable/disable parameter groups and constraint groups for subsequent steps,
e.g. processingStep:estimate or
processingStep:writeResults.
The name and nameConstraint of these groups
are defined in parametrizations.
Prior models or previously estimated parameters used as new apriori $\M x_0$ values are unaffected
and they are always reduced from the observations. This means all unselected parameters are kept fixed
to their last result.
An example would be to process at a 5-minute sampling using
processingStep:selectEpochs
and then at the end to densify the clock parameters to the full 30-second observation sampling
while keeping all other parameters fixed
(disable=*, enable=*.clock*, enable=parameter.STEC).
Name
Type
Annotation
parametrization
choice
enable
sequence
name
string
wildcards: * and ?
disable
sequence
name
string
wildcards: * and ?
SelectEpochs
Select epochs for subsequent steps. This step can be used to reduce the processing sampling
while keeping the original observation sampling for all preprocessing steps (e.g. outlier and cycle slip detection).
Another example is to process at a 5-minute sampling by setting nthEpoch=10 and then
at the end to densify only the clock parameters to the full 30-second observation sampling by
setting nthEpoch=1 while keeping all other parameters fixed
with processingStep:selectParametrizations.
Name
Type
Annotation
nthEpoch
uint
use only every nth epoch in all subsequent processing steps
SelectNormalsBlockStructure
Select block structure of sparse normal equations for subsequent steps.
This step can be used to define the structure of the different parts of the normal equation system,
which can have a major impact on computing performance and memory consumption depending on the processing setup.
Figure: Structure of normal equations in GNSS processing
The normal equation system is divided into three parts for epoch, interval, and ambiguity parameters.
The epoch part is subdivided further into one subpart per epoch. Each part is divided into blocks and only non-zero
blocks are stored in memory to reduce memory consumption and to prevent unnecessary matrix computations.
defaultBlockSizeEpoch, defaultBlockSizeInterval, and defaultBlockSizeAmbiguity control
the size of the blocks within each part of the normal equations. defaultBlockReceiverCount can be set to group
a number of receivers into one block within the epoch and interval parts.
If keepEpochNormalsInMemory=no epoch blocks are eliminated after they are set up to reduce the number
of parameters in the normal equation system. defaultBlockCountReduction controls after how many epoch blocks
an elimination step is performed. For larger processing setups or high sampling rates epoch block elimination is recommended
as the large number of clock parameters require a lot of memory.
Name
Type
Annotation
defaultBlockSizeEpoch
uint
block size of epoch parameters, 0: one block
defaultBlockSizeInterval
uint
block size of interval parameters, 0: one block
defaultBlockSizeAmbiguity
uint
block size of ambiguity parameters, 0: one block
defaultBlockReceiverCount
uint
number of receivers to group into one block for epoch and interval
defaultBlockCountReduction
uint
minimum number of blocks for epoch reduction
keepEpochNormalsInMemory
boolean
speeds up processing but uses much more memory
accumulateEpochObservations
boolean
set up all observations per epoch and receiver at once
SelectReceivers
This step can be used to process only a subset of stations in subsequent processing steps.
The most common use is to start the processing with a well-distributed network of core stations as seen in
GNSS satellite orbit determination and network analysis.
To later process all other stations individually, use the processing step
processingStep:forEachReceiverSeparately
and select all stations excluding the core stations in that step.
Perform these processing steps for each selectReceivers separately.
All non-receiver-related parameters parameters are disabled in these processing steps.
Perform these processing steps. This step can be used to structure complex processing flows.
The processingSteps that affect the following steps
(those beginning with Select) only have an effect until the end of the group.
Definition and basic information of GNSS receivers.
Most of the input files are provided in GROOPS file formats at
https://ftp.tugraz.at/pub/ITSG/groops (marked with * below).
These files are regularly updated.
It is possible to limit the observation types to be used in the processing by a list of useType
and any observation types not defined within the list are ignored and discarded.
Similarly observations defined in the list of ignoreType are ignored and discarded.
The codes used follow the RINEX 3 definition.
Each receiver goes through a preprocessing step individually, where observation outliers are removed or downweighted,
continuous tracks of phase observations are defined for ambiguity parametrization, cycle slips are detected, and receivers are
disabled if they do not fulfill certain requirements. The preprocessing step consists of an initial PPP estimation done by
robust least squares adjustment and checks whether the position error
of the solutions exceeds codeMaxPositionDiff. If the error exceeds the threshold the receiver will be discarded.
The preprocessing also sets initial clock error values and removes tracks that stay below a certain elevation mask (elevationTrackMinimum).
A network of GNSS ground stations is defined via inputfileStationList.
Each line can contain more than one station. The first station in each line for which inputfileObservations
exists and contains enough observations is used for the processing.
All input files except inputfileAntennaDefinition,
inputfileReceiverDefinition, and
inputfileAccuracyDefinition are read for each station.
The file name is interpreted as a template with the variable {station} being replaced by the station name.
The effects of loading and tidal deformation on station positions can be corrected for
via loadingDisplacement and
tidalDisplacement, respectively.
Tidal deformations typically include:
[m] max. allowed position error by PPP code only clock error estimation
denoisingLambda
double
regularization parameter for total variation denoising used in cylce slip detection
tecWindowSize
uint
(0 = disabled) window size for TEC smoothness evaluation used in cycle slip detection
tecSigmaFactor
double
factor applied to moving standard deviation used as threshold in TEC smoothness evaluation during cycle slip detection
outputfileTrackBefore
filename
variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track before cycle slip detection
outputfileTrackAfter
filename
variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track after cycle slip detection
LowEarthOrbiter
A single low-Earth orbiting (LEO) satellite with an onboard GNSS receiver.
An apriori orbit is needed as inputfileOrbit.
Attitude data must be provided via inputfileStarCamera.
If no attitude data is available from the satellite operator,
the star camera data can be simulated by using SimulateStarCamera.
Name
Type
Annotation
inputfileStationInfo
filename
satellite metadata (antenna, receiver, ...)
inputfileAntennaDefinition
filename
antenna center offsets and variations
noAntennaPatternFound
choice
what should happen if no antenna pattern is found for an observation
ignoreObservation
ignore observation if no matching pattern is found
useNearestFrequency
use pattern of nearest frequency if no matching pattern is found
[m] max. allowed position error by PPP code only clock error estimation
denoisingLambda
double
regularization parameter for total variation denoising used in cylce slip detection
tecWindowSize
uint
(0 = disabled) window size for TEC smoothness evaluation used in cycle slip detection
tecSigmaFactor
double
factor applied to moving standard deviation used as threshold in TEC smoothness evaluation during cycle slip detection
outputfileTrackBefore
filename
variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track before cycle slip detection
outputfileTrackAfter
filename
variables {station}, {prn}, {trackTimeStart}, {trackTimeEnd}, {types}, TEC and MW-like combinations in cycles for each track after cycle slip detection
Metadata input files (marked with * below) are provided in GROOPS file formats at
https://ftp.tugraz.at/pub/ITSG/groops. These files are regularly updated.
A GnssType string consists of six parts (type, frequency, attribute, system, PRN, frequency number)
represented by seven characters.
The first three characters (representing type, frequency, and attribute) correspond to the observation codes of the
RINEX 3 definition.
The satellite system character also follows the RINEX 3 definition:
G = GPS
R = GLONASS
E = Galileo
C = BeiDou
S = SBAS
J = QZSS
I = IRNSS
PRN is a two-digit number identifying a satellite.
Frequency number is only used for GLONASS, where the range -7 to 14 is represented by letters starting with A.
Each part of a GnssType string can be replaced by a wildcard '*', enabling the use of these strings as patterns,
for example to select a subset of observations (e.g. C**G** matches all GPS code/range observations).
Trailing wildcards are optional, meaning L1*R is automatically expanded to L1*R***.
For some RINEX 2 types (e.g. Galileo L5) the RINEX 3 attribute is unknown/undefined and can be replaced by ?,
for example L5?E01.
This class computes functionals of the time depending gravity field,
e.g potential, gravity anomalies or gravity gradients.
If several instances of the class are given the results are summed up.
Before summation every single result is multiplicated by a factor.
To subtract a normal field like GRS80 from a potential
to get the disturbance potential you must choose one factor by 1
and the other by -1. To get the mean of two fields just set each factor to 0.5.
Some of the instances gives also information about the accuracy.
The variance of the result (sum) is computed by means of variance propagation.
PotentialCoefficients
Reads coefficients of a spherical harmonics expansion from file.
The potential is given by
\[
V(\lambda,\vartheta,r) = \frac{GM}{R}\sum_{n=0}^\infty \sum_{m=0}^n \left(\frac{R}{r}\right)^{n+1}
\left(c_{nm} C_{nm}(\lambda,\vartheta) + s_{nm} S_{nm}(\lambda,\vartheta)\right).
\]If set the expansion is limited in the range between minDegree
and maxDegree inclusivly. The computed result
is multiplied with factor. If setSigmasToZero is true
the variances are set to zero. This option is only important for variance propagation
and does not change the result of the gravity field functionals.
Name
Type
Annotation
inputfilePotentialCoefficients
filename
minDegree
uint
maxDegree
uint
factor
double
the result is multiplied by this factor, set -1 to subtract the field
setSigmasToZero
boolean
set variances to zero, should be used by adding back reference fields
PotentialCoefficientsInterior
Reads coefficients of a spherical harmonics expansion (for inner space) from file.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly. The computed result is multiplied with factor.
If setSigmasToZero is true the variances are set to zero.
This option is only important for error propagation
and does not change the result of the gravity field functionals.
Name
Type
Annotation
inputfilePotentialCoefficients
filename
minDegree
uint
maxDegree
uint
factor
double
the result is multiplied by this factor, set -1 to subtract the field
setSigmasToZero
boolean
set variances to zero, should be used by adding back reference fields
FromParametrization
Reads a solution vector from file inputfileSolution
which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE).
The coefficients of the vector are interpreted from position indexStart
(counting from zero) with help of parametrizationGravity.
If the solution file contains solution of several right hand sides you can choose
one with number rightSide (counting from zero).
You can also read a vector from file inputfileSigmax
containing the accuracies of the coefficients.
standards deviations or covariance matrix of the solution
indexStart
uint
position in the solution vector
rightSide
uint
if solution contains several right hand sides, select one
factor
double
the result is multiplied by this factor, set -1 to subtract the field
TimeSplines
Read a time variable gravity field from file
inputfileTimeSplinesGravityfield
represented by a spherical harmonics expansion in the spatial domain and spline functions
in the time domain. If set the expansion is limited in the range between
minDegree and maxDegree inclusivly.
the result is multiplied by this factor, set -1 to subtract the field
Trend
The given gravityfield is interpreted
as trend function and the result is computed at time $t$ as follows
\[
V(\M x,t) = \frac{t-t_0}{\Delta t}V(\M x),
\]with $t_0$ is timeStart and $\Delta t$ is timeStep.
this field is multiplicated by (time-time0)/timeStep
timeStart
time
reference time
timeStep
time
Oscillation
The given gravityfield is interpreted
as oscillation function and the result is computed at time $t$ as follows
\[
V(\M x,t) = \cos(\omega)V_{cos}(\M x)+\sin(\omega)V_{sin}(\M x),
\]with $\omega=\frac{2\pi}{T}(t-t_0)$.
A gravityfield is only evaluated in the interval between
timeStart inclusively and timeEnd exclusively.
Outside the interval the result is zero.
This class is useful to get a time series of monthly mean GRACE gravity field solutions.
In each month another file of potentialCoefficients is valid.
This can easily be created with loop.
Treat tides as gravitational forces.
The tides need a realization of earthRotation
to transform between the CRF and TRF and to compute rotational deformation
from polar motion.
It also needs ephemerides from Sun, moon, and planets.
The gravity is integrated from a topographic mass distribution.
For each grid point in inputfileGridRectangular a prisma with
density is assumed. The horizontal extension is computed from the grid spacing
and the vertical extension is given by radialLowerBound
and radialUpperBound above ellipsoid. All values are expressions and computed
for each point with given data in the grid file. The standard variables for grids
are available, see dataVariables.
Example: The grid file contains the orthometric height of the topography in the first
column, the geoid height in the second and the mean density of each prism in the third
column. In this case the following settings should be used:
radialUpperBound = data0+data1,
radialLowerBound = data1,
density = data2.
As the prim computation is time consuming a maximum distance around the evaluation point
can defined with distancePrism. Afterwards a simplified radial line
(the prism mass is concentrated to a line in the center) is used up to
a distance of distanceLine. At last the prim is approximated by a point mass
in the center up to a distance distanceMax (if set). Prisms nearby the evaluation
point can be excluded with distanceMin.
Name
Type
Annotation
inputfileGridRectangular
filename
Digital Terrain Model
density
expression
expression [kg/m**3]
radialUpperBound
expression
expression (variables 'height', 'data', 'L', 'B' and, 'area' are taken from the gridded data
radialLowerBound
expression
expression (variables 'height', 'data', 'L', 'B' and, 'area' are taken from the gridded data
distanceMin
double
[km] min. influence distance (ignore near zone)
distancePrism
double
[km] max. distance for prism formular
distanceLine
double
[km] max. distance for radial integration
distanceMax
double
[km] max. influence distance (ignore far zone)
factor
double
the result is multiplied by this factor, set -1 to subtract the field
EarthquakeOscillation
The given gravityfield is interpreted as an oscillation function
in the gravitational potential field, caused by large earthquakes.
The result is computed at time $t$ as follows:
\[
C_{lm}(\M t) = \sum_{n=0}^NC_{nlm}(1-\cos(\omega)\exp(\frac{-\omega}{2Q_{nlm}})),
\]with $\omega=\frac{2\pi}{T_{nlm}}(t-t_0)$. In this equation, $Q_{nlm}$ is the attenuation factor,
$n$ is the overtone factor, $m$ is degree, $l$ is order, and $t$ is time in second.
$T_{nlm}$ and $Q_{nlm}$ are computed with the elastic Earth model or observed from the long
period record of superconducting gravimeter measurements after the earthquakes.
Name
Type
Annotation
inputCoefficientMatrix
filename
oscillation model parameters
time0
time
the time earthquake happened
minDegree
uint
maxDegree
uint
GM
double
Geocentric gravitational constant
R
double
reference radius
Filter
Convert gravityfield to spherical harmonics
and filter the coefficients.
This class generates a set of grid points. In a first step, the grid
is always generated globally, with border a regional
subset of points can be extracted from the global grid. The parameters
R and inverseFlattening define the shape of the ellipsoid
on which the grid is generated. In case inverseFlattening is
chosen as zero, a sphere is used. With height the distance of
the points above the ellipsoid can be defined. In addition to the location
of the points, weights are assigned to each of the points. These weights
can be regarded as the surface element associated with each grid point.
Geograph
The geographical grid is an equal-angular point distribution with points
located along meridians and along circles of latitude. deltaLambda
denotes the angular difference between adjacent points along meridians and
deltaPhi describes the angular difference between adjacent points
along circles of latitude. The point setting results as follows:
\[
\lambda_i=\frac{\Delta\lambda}{2}+i\cdot\Delta\lambda\qquad\mbox{with}\qquad 0\leq i< \frac{360^\circ}{\Delta\lambda},
\]\[
\varphi_j=-90^\circ+\frac{\Delta\varphi}{2}+j\cdot\Delta\varphi\qquad\mbox{with}\qquad 0\leq j<\frac{180^\circ}{\Delta\varphi}.
\]The number of grid points can be determined by
\[
I=\frac{360^\circ}{\Delta\lambda}\cdot\frac{180^\circ}{\Delta\varphi}.
\]The weights are calculated according to
\[
w_i=\int\limits_{\lambda_i-\frac{\Delta\lambda}{2}}^{\lambda_i+\frac{\Delta\lambda}{2}}\int\limits_{\vartheta_i-\frac{\Delta\vartheta}{2}}^{\vartheta_i+\frac{\Delta\vartheta}{2}}=2\cdot\Delta\lambda\sin(\Delta\vartheta)\sin(\vartheta_i).
\]
The zeroth level of densification
coincides with the 12 icosahedron vertices, as displayed in the upper left part
of Fig. fig:triangle_grid. Then, depending on the envisaged densification,
each triangle edge is divided into $n$ parts, illustrated in the upper right
part of Fig. fig:triangle_grid. The new nodes on the edges are then connected
by arcs of great circles parallel to the triangle edges. The intersections of
each three corresponding parallel lines become nodes of the densified grid as well.
As in case of a spherical triangle those three connecting lines do not exactly
intersect in one point, the center of the resulting triangle is used as location
for the new node (lower left part of Fig. fig:triangle_grid). The lower right
side of Fig. fig:triangle_grid finally shows the densified triangle vertex
grid for a level of $n=3$. The number of grid points in dependence of the chosen
level of densification can be calculated by
\[\label{eq:numberVertex}
I=10\cdot(n+1)^2+2.
\]
Figure: TriangleVertex grid.
Name
Type
Annotation
level
uint
division of icosahedron, point count = 10*(n+1)**2+2
The points of the zeroth level are located at the centers of the icosahedron triangles.
To achieve a finer grid, each of the triangles is divided into four smaller triangles by
connecting the midpoints of the triangle edges. The refined grid points are again located
at the center of the triangles. Subsequently, the triangles can be further densified up to
the desired level of densification $n$, which is defined by level.
The number of global grid points for a certain level can be determined by
\[\label{eq:numberCenter}
I=20\cdot 4^n.
\]Thus the quantity of grid points depends exponentially on the level $n$, as with
every additional level the number of grid points quadruplicates.
The grid features equiangular spacing along circles of latitude with
parallelsCount defining the number $L$ of the parallels.
\[
\Delta\lambda=\frac{\pi}{L}\qquad\Rightarrow\qquad\lambda_i=\frac{\Delta\lambda}{2}+i\cdot\Delta\lambda\qquad\mbox{with}\qquad 0\leq i< 2L.
\]Along the meridians the points are located at $L$ parallels at
the $L$ zeros $\vartheta_j$ of the Legendre polynomial of degree $L$,
\[
P_L(\cos\vartheta_j)=0.
\]Consequently, the number of grid points sums up to
\[
I=2\cdot L^2.
\]The weights can be calculated according to
\[
w_i(L)=\Delta\lambda\frac{2}{(1-t_i^2)(P'_{L}(\cos(\vartheta _i)))^2},\label{weights}
\]
The Reuter grid features equi-distant spacing along the meridians determined
by the control parameter $\gamma$ according to
\[
\Delta\vartheta=\frac{\pi}{\gamma}\qquad\Rightarrow\vartheta_j=j\Delta\vartheta,\qquad\mbox{with}\qquad 1\leq j\leq \gamma-1.
\]Thus $\gamma+1$ denotes the number of points per meridian, as the two poles
are included in the point distribution as well. Along the circles of latitude,
the number of grid points decreases with increasing latitude in order to achieve
an evenly distributed point pattern. This number is chosen, so that the points
along each circle of latitude have the same spherical distance as two adjacent
latitudes. The resulting relationship is given by
\[\label{eq:sphericalDistance}
\Delta\vartheta=\arccos\left( \cos^2\vartheta_j+\sin^2\vartheta_j\cos\Delta\lambda_j\right).
\]The left hand side of this equation is the spherical distance between adjacent
latitudes, the right hand side stands for the spherical distance between two points
with the same polar distance $\vartheta_j$ and a longitudinal difference of
$\Delta\lambda_i$. This longitudinal distance can be adjusted depending on
$\vartheta_j$ to fulfill Eq. \eqref{eq:sphericalDistance}. The resulting
formula for $\Delta\lambda_i$ is
\[\label{eq:deltaLambdai}
\Delta\lambda_j=\arccos\left( \frac{\sin\Delta\vartheta -\cos^2\vartheta_j}{\sin^2\vartheta_j}\right).
\]The number of points $\gamma_j$ for each circle of latitude can then be determined by
\[\label{eq:gammai}
\gamma_j=\left[ \frac{2\pi}{\Delta\lambda_j}\right] .
\]Here the Gauss bracket $[x]$ specifies the largest integer equal to or less than $x$.
The longitudes are subsequently determined by
\[
\lambda_{ij}=\frac{\Delta\lambda_j}{2}+i\cdot(2\pi/\gamma_j),\qquad\mbox{with}\qquad 0\leq i< \gamma_j.
\]The number of grid points can be estimated by
\[\label{eq:numberReuter}
I=\leq 2+\frac{4}{\pi}\gamma^2,
\]The $\leq$ results from the fact that the $\gamma_j$ are restricted to integer values.
This kind of grid distributes an arbitrarily chosen number of $I$ points
(defined by globalPointsCount) following a recursive, quasi random sequence.
In longitudinal direction the pattern follows
\[
\Delta\lambda=\frac{2\pi}{I}\qquad\Rightarrow\qquad\frac{\Delta\lambda}{2}+\lambda_i=i\cdot\Delta\lambda\qquad\mbox{with}\qquad 1\leq i\leq I.
\]This implies that every grid point features a unique longitude, with equi-angular
longitudinal differences.
The polar distance in the form $t_i=\cos\vartheta_i$ for each point is determined
by the following recursive sequence:
Starting from an interval $t\in[-1,1]$.
If $I=1$, then the midpoint of the interval is returned as result of
the sequence, and the sequence is terminated.
If the number of points is uneven, the midpoint is included into the list of $t_i$.
Subsequently, the interval is bisected into an upper and lower half,
and the sequence is called for both halves.
$t$ from upper and lower half are alternately sorted into the list of $t_i$.
The polar distances are calculated by
\[
\vartheta_i=\arccos\, t_i.
\]
The Driscoll-Healy grid, has equiangular spacing along the meridians as well
as along the circles of latitude. In longitudinal direction (along the parallels),
these angular differences for a given dimension $L$ coincide with those
described for the corresponding geographical grid and Gauss grid. Along the meridians,
the size of the latitudinal differences is half the size compared to the geographical
grid. This results in the following point pattern,
\[
\begin{split}
\Delta\lambda=\frac{\pi}{L}\qquad&\Rightarrow\qquad\lambda_i=\frac{\Delta\lambda}{2}+i\cdot\Delta\lambda\qquad&\mbox{with}\qquad 0\leq i< 2L, \\
\Delta\vartheta=\frac{\pi}{2L}\qquad&\Rightarrow\qquad\vartheta_j=j\cdot\Delta\vartheta\qquad&\mbox{with}\qquad 1\leq j\leq 2L.
\end{split}
\]Consequently, the number of grid points is
\[
I=4\cdot L^2.
\]The weights are given by
\[
w_i=\Delta\lambda\frac{4}{2L}\sin(\vartheta_i)\sum_{l=0}^{L-1}\frac{\sin\left[ (2l+1)\;\vartheta_i\right] }{2l+1}.
\]
GROOPS is a software toolkit for gravity field recovery, GNSS processing, and statistical analysis of time series and spatial data.
It is licensed under GPLv3 and hosted at GitHub.
GROOPS depends on data files such as Earth rotation, Love numbers, and GNSS meta information.
An initial data set that is regularly updated is available on our FTP server,
https://ftp.tugraz.at/pub/ITSG/groops.
You can choose between downloading the data directory or a single
data.zip with the same content.
Additional tutorial videos are available at our YouTube channel.
Questions about the usage of GROOPS can be posted in the
Discussions.
General Information
File formats and basic concepts for using GROOPS and its GUI.
This class resamples data of a times series to new poins in time.
Polynomial
Polynomial prediction using a moving polynomial of polynomialDegree.
The optimal polynomial is chosen based on the centricity of the data points around the resampling
point and the distance to all polynomial data points. All polynomial data points must be within
maxDataPointRange. Resampling points within maxExtrapolationDistance of the
polynomial will be extrapolated. The elements maxDataPointRange and maxExtrapolationDistance
are given in the unit of seconds. If negative values are used, the unit is relative to the median input sampling.
Figure: Example of polynomial prediction when resampling from 5 to 1 minute sampling
Name
Type
Annotation
polynomialDegree
uint
degree of the moving polynomial
maxDataPointRange
double
[seconds] all degree+1 data points must be within this range for a valid polynomial
maxExtrapolationDistance
double
[seconds] resampling points within this range of the polynomial will be extrapolated
Least squares polynomial fit
A polynomial of polynomialDegree is estimated using all data points within
maxDataPointDistance of the resampling point. This polynomial is then used
to predict the resampling point. A resampling point will be extrapolated if there are
only data points before/after as long as the closest one is within maxExtrapolationDistance.
The elements maxDataPointDistance and maxExtrapolationDistance are given
in the unit of seconds. If negative values are used, the unit is relative to the median input sampling.
Figure: Example of least squares polynomial fit when resampling from 5 to 1 minute sampling
Name
Type
Annotation
polynomialDegree
uint
degree of the estimated polynomial
maxDataPointDistance
double
[seconds] all data points within this distance around the resampling point will be used
maxExtrapolationDistance
double
[seconds] resampling points within this range of the polynomial will be extrapolated
Fill gaps with least squares polynomial fit
Name
Type
Annotation
polynomialDegree
uint
degree of the estimated polynomial
maxDataGap
double
[seconds] max data gap to interpolate
maxDataSpan
double
[seconds] time span on each side used for least squares fit
Kernel defines harmonic isotropic integral kernels $K$.
\[
T(P) = \frac{1}{4\pi}\int_\Omega K(P,Q)\cdot f(Q)\,d\Omega(Q),
\]where $T$ is the (disturbance)potential and $f$ is a functional on the spherical surface $\Omega$.
The Kernel can be exapanded into a series of (fully normalized) legendre polynomials
\[\label{eq.kernel}
K(\cos\psi,r,R) = \sum_n \left(\frac{R}{r}\right)^{n+1}
k_n\sqrt{2n+1}\bar{P}_n(\cos\psi).
\]On the one hand the kernel defines the type of the functionals $f$ that are measured
or have to be computed, e.g. gravity anomalies given by the Stokes-kernel.
On the other hand the kernel functions can be used as basis functions to represent
the gravity field, e.g. as spline functions or wavelets.
GeoidHeight
The geoid height is defined by Bruns formula
\[
N = \frac{1}{\gamma}T
\]with $T$ the disturbance potential and the normal gravity
\[\label{normalgravity}
\gamma = \gamma_0 - 0.30877\cdot 10^{-5}/s^2(1-0.00142\sin^2(B))h
\]and
\[
\gamma_0 = 9.780327\,m/s^2(1+0.0053024\sin^2(B)-0.0000058\sin^2(2B))
\]where $h$ is the ellipsoidal height in meter and $B$ the longitude.
The kernel is given by
\[
K(\cos\psi,r,R) = \gamma\frac{R(r^2-R^2)}{l^3},
\]and the coefficients in \eqref{eq.kernel} are
\[
k_n = \gamma.
\]
Anomalies
Gravity anomalies in linearized form are defined by
\[
\Delta g = -\frac{\partial T}{\partial r}-\frac{2}{r}T.
\]The Stokes kernel is given by
\[
K(\cos\psi,r,R) = \frac{2R^2}{l}-3\frac{Rl}{r^2}-\frac{R^2}{r^2}\cos\psi
\left(5+3\ln\frac{l+r-R\cos\psi}{2r}\right),
\]and the coefficients in \eqref{eq.kernel} are
\[
k_n = \frac{R}{n-1}.
\]
Disturbance
Gravity disturbances in linearized form are defined by
\[
\delta g = -\frac{dT}{dr}.
\]The Hotine kernel is given by
\[
K(\cos\psi,r,R) = \frac{2R^2}{l}-R\ln\frac{l+R-r\cos\psi}{r(1-\cos\psi)},
\]and the coefficients in \eqref{eq.kernel} are
\[
k_n = \frac{R}{n+1}.
\]
Potential
The Abel-Poisson kernel is given by
\[
K(\cos\psi,r,R) = \frac{R(r^2-R^2)}{l^3},
\]and the coefficients in \eqref{eq.kernel} are
\[
k_n = 1.
\]
Density
This kernel defines a point mass or mass on a single layer ($1/l$-kernel)
taking the effect of the loading into account.
The coefficients of the kernel defined in \eqref{eq.kernel} are
\[
k_n = 4\pi G R\frac{1+k_n'}{2n+1},
\]where $G$ is the gravitational constant and $k_n'$ are the load Love numbers.
Name
Type
Annotation
inputfileLoadingLoveNumber
filename
WaterHeight
Height of equivalent water columns taking the effect of the loading into account.
The coefficients of the kernel defined in \eqref{eq.kernel} are
\[
k_n = 4\pi G \rho R\frac{1+k_n'}{2n+1},
\]where $G$ is the gravitational constant, $\rho$ is the density of water
and $k_n'$ are the load Love numbers.
Name
Type
Annotation
density
double
[kg/m**3]
inputfileLoadingLoveNumber
filename
BottomPressure
Ocean bottom pressure caused by water and atmosphere masses columns taking the effect of the loading into account.
The coefficients of the kernel defined in \eqref{eq.kernel} are
\[
k_n = \frac{4\pi G R }{\gamma}\frac{1+k_n'}{2n+1},
\]where $G$ is the gravitational constant, $\gamma$ is the normal gravity and $k_n'$ are the load Love numbers.
Name
Type
Annotation
inputfileLoadingLoveNumber
filename
Deformation
Computes the radial deformation caused by loading.
The coefficients of the kernel defined in \eqref{eq.kernel} are
\[
k_n = \gamma\frac{1+k_n'}{h_n'},
\]where $\gamma$ is the normal gravity defined in \eqref{normalgravity},
$h_n'$ and $k_n'$ are the load Love numbers and the load deformation Love numbers.
Name
Type
Annotation
inputfileDeformationLoadLoveNumber
filename
inputfilePotentialLoadLoveNumber
filename
if full potential is given and not only loading potential
RadialGradient
This kernel defines the second radial derivative of the (disturbance) potential.
\[
T_{rr} = \frac{\partial^2 T}{\partial r^2}.
\]The coefficients of the kernel defined in \eqref{eq.kernel} are
\[
k_n = \frac{r^2}{(n+1)(n+2)}.
\]
Coefficients
The kernel is defined by the coefficients $k_n$ given by file.
Name
Type
Annotation
inputfileCoefficients
filename
FilterGauss
Another kernel is smoothed by a gauss filter
which is defined by
\[
F(\cos\psi) = \frac{b\cdot e^{-b(1-\cos\psi)}}{1-e^{-2b}}
\]with $b = \frac{ln(2)}{1-\cos(r/R)}$ where $r$ is the given
smoothing radius in km and $R=6378.1366$ km is the
Earth radius.
The coefficients $k_n$ of the kernel are multiplicated by
\[
f_n = \frac{1}{2n+1} \int_{-1}^1 F(t)\cdot \bar{P}_n(t)\,dt.
\]
Another kernel is smoothed by a Blackman low-pass filter. The filter is
defined through the beginning and end of the transition from pass-band to stop-band. This
transition band is specified by startDegreeTransition ($n_1$) and stopDegreeTransition ($n_2$).
The coefficients of this kernel are defined as
\[
\begin{cases}
1 & \text{for } n < n_1 \\
A_n^2 & \text{for } n_1\leq n \leq n_2 \\
0 & \text{for } n > n_2 \\
\end{cases}
\]with
\[
A_n = 0.42 + 0.5\cos(\pi \frac{n-n_1}{n_2-n_1}) + 0.08 \cos(2\pi\frac{n-n_1}{n_2-n_1}).
\]
Another kernel is truncated before minDegree and after maxDegree.
The coefficients of this kernel are defined as
\[
k_n =
\begin{cases}
1 & \text{for } n_{\text{minDegree}} \leq n \leq n_{\text{maxDegree}}\\
0 & \text{else.} \\
\end{cases}
\]
The selenoid height is defined by Bruns formula
\[
N = \frac{1}{\gamma}T
\]with $T$ the disturbance potential and the normal gravity $\gamma=\frac{GM}{R^2}$ of the moon.
The kernel is given by
\[
K(\cos\psi,r,R) = \gamma\frac{R(r^2-R^2)}{l^3},
\]and the coefficients in \eqref{eq.kernel} are
\[
k_n = \gamma.
\]
Generates a sequence with variables to loop over.
The variable names can be set with variableLoop... and
the current values are assigned to the variables for each loop step.
With condition only a subset of loop steps are performed.
The variableLoopIndex and variableLoopCount are not affected by the condition.
The result would therefore be the same as using LoopPrograms with a nested IfPrograms.
Perform the loop in the alphabetically
order defined by the evaluated sortString for each loop step.
So the string must contain loop variables. If sortString
is empty, no sorting will take place.
Example: The sortString={loopTime:%m} of a time series
sorts the times in ascending order by month.
The same principle is used to remove duplicates. If different loop steps
evaluates removeDuplicatesString to the same string,
only the first loop step is executed.
This class provides a matrix used e.g. by MatrixCalculate.
If multiple matrices are given the resulting matrix is the sum all
and the size is exandeded to fit all matrices. Before the computation of each submatrix
the variables rowsBefore and columnsBefore with current size of the overall matrix
are set. As all matrices can be manipulated before, complex matrix operations are possible.
Matrix from a normal equation file. The symmetric normal matrix,
the right hand side vector, the lPl vector, or the observation count $(1\times1)$ can be selected.
Name
Type
Annotation
inputfileNormalEquation
filename
type
choice
normalMatrix
rightHandSide
lPl
observationCount
factor
double
Expression
Matrix filled by an expression. For each element of the new matrix the variables
row and column are set and the expression element is evaluated.
Excample: The element=if(row==column,1,0) generates an identity matrix.
Name
Type
Annotation
rows
expression
(variables: rowsBefore, columnsBefore)
columns
expression
(variables: rowsBefore, columnsBefore)
element
expression
for each element of matrix (variables: row, column, rows, columns, rowsBefore, columnsBefore)
Element manipulation
The elements of a matrix are replaced an expression.
For each element of the matrix the variables data, row, column
are set and the expression element is evaluated and replaces the element.
Additionally the standard data variables are available (assigned each row),
see dataVariables.
for each element of matrix (variables: data, row, column, rows, columns, rowsBefore, columnsBefore)
ElementWiseOperation
Given two matrices $\mathbf{A}$ and $\mathbf{B}$ this class computes $c_{ij} = f(a_{ij}, b_{ij})$,
where $f$ is an expression (for example data0*data1).
For each element of the matrix the variables data0, data1, row, column
are set and the expression element is evaluated.
Computes the eigenvalues of a square matrix and gives a vector of eigenvalues for symmetric matrices
or a matrix with 2 columns with real and imaginary parts in general case.
Extract the diagonal or subdiagnoal ($n\times 1$ vector) of a matrix.
The zero diagonal means the main diagonal, a positive value the superdiagonal,
and a negative the subdiagonal.
zero: main diagonal, positive: superdiagonal, negative: subdiagonal
Set type
Set type (matrix, matrixSymmetricUpper, matrixSymmetricLower, matrixTriangularUpper, matrixTriangularLower)
of a matrix. If the type is not matrix, the matrix must be quadratic. Symmetric matrices are filled symmetric
and for triangular matrix the other triangle is set to zero.
This class gives the non conservative forces acting on satellites.
Relativistic effect
The relativistic effect to the acceleration of an artificial Earth satellite
according to IERS2010 conventions.
The macro model and the attitude of the satellite is not needed.
Name
Type
Annotation
beta
double
PPN (parameterized post-Newtonian) parameter
gamma
double
PPN (parameterized post-Newtonian) parameter
J
double
Earth’s angular momentum per unit mass [m**2/s]
GM
double
Geocentric gravitational constant
factor
double
the result is multiplied by this factor
RadiationPressure
This class computes acceleration acting on a satellite caused by Solar and Earth radiation pressure
and thermal radiation.
Solar radiation pressure: The solar constant at 1 AU can be set via solarFlux.
The factorSolarRadation can be used to scale the computed acceleration of the direct solar radiation.
Earth radiation pressure:
Input are a time series of gridded albedo values (unitless) as inputfileAlbedoTimeSeries
and a time series of gridded longwave flux (W/m$^2$) as inputfileLongwaveFluxTimeSeries.
Both files are optional and if not specified, the respective effect on the acceleration is not computed.
The factorEarthRadation can be used to scale the computed acceleration of the earth radiation.
The thermal radiation (TRP) of the satellite itself is either computed as direct re-emission or
based on the actual temperature of the satellite surfaces, depending on the setings of the
satellite macro model. The second one uses a transient temperature model
with a temporal differential equation which disallows parallel computing.
The factorThermalRadiation can be used to scale the computed acceleration of the TRP.
The algorithms are described in:
Woeske et. al. (2019), GRACE accelerometer calibration by high precision non-gravitational force modeling,
Advances in Space Research, https://doi.org/10.1016/j.asr.2018.10.025.
GriddedDataTimeSeries of longwave flux values [W/m^2]
factorSolarRadation
double
Solar radiation pressure is multiplied by this factor
factorEarthRadation
double
Earth radiation preussure is multiplied by this factor
factorThermalRadiation
double
Thermal (re-)radiation is multiplied by this factor
AtmosphericDrag
Atmospheric drag model.
Algorithm for the atmospheric drag modelling is based on the free molecule flow
theory by Sentman 1961. An analytical expression of this treatise is given in
Moe and Moe 2005.
Sentman L. (1961), Free molecule flow theory and its application to the determination
of aerodynamic forces, Technical report.
Moe K., Moe M. M. (2005), Gas-surface interactions and satellite drag coefficients,
Planetary and Space Science 53(8), 793-801, doi:10.1016/j.pss.2005.03.005.
Optional determination steps:
Turn temperature on or off.
In the first case, the model mentioned above is applied, which estimates variable drag
and lift coefficients - in the latter case a constant drag coefficient can be specified.
Turn wind on/off:
It enables the usage of the Horizontal Wind Model 2014 to add additional thermospheric
winds in the calculation process.
compute drag and lift, otherwise simple drag coefficient is used
considerWind
boolean
factor
double
the result is multiplied by this factor
AtmosphericDragFromDensityFile
Atmospheric drag computed from thermospheric density along the orbit
(inputfileDensity, MISCVALUE). The thermosphere
is used to to compute temperature and wind.
For further details see atmosphericDrag.
compute drag and lift, otherwise simple drag coefficient is used
considerWind
boolean
factor
double
the result is multiplied by this factor
Antenna thrust
The thrust (acceleration) in the opposite direction the antenna is facing
which is generated by satellite antenna broadcasts.
The thrust is defined in the satellite macro model.
Name
Type
Annotation
factor
double
the result is multiplied by this factor
FromParametrization
Reads a solution vector from file inputfileSolution
which may be computed by a least squares adjustment (e.g. by NormalsSolverVCE).
The coefficients of the vector are interpreted from position indexStart
(counting from zero) with help of parametrization.
If the solution file contains solution of several right hand sides you can choose
one with number rightSide (counting from zero).
This class implements the generation of different types of noise.
It provides a generic interface that can be implemented by different
types of generators. The characteristics of the generated noise
is determined by the generators. See the appropriate documentation
for more information.
White
The noise is Gaussian with a standard deviation sigma.
The noise is computed via a pseudo random sequence with a start value given
by initRandom. The same value always yields the same sequence.
Be careful in parallel mode
as all nodes generates the same pseudo random sequence.
If this value is set to zero a real random value is used as starting value.
Name
Type
Annotation
sigma
double
standard deviation
initRandom
uint
start value for pseudo random sequence, 0: real random
ExpressionPSD
This generator creates noise defined by a one sided PSD.
The psd is an expression controlled by the variable 'freq'.
To determine the frequency sampling must be given.
number of additional epochs at before start and after end
overSamplingFactor
uint
noise with multiple higher sampling -> filter -> decimate
PowerLaw
This generator creates noise that conforms to a power law relationship, where the power
of the noise at a frequency is proportional to $1/f^\alpha$, with a typically between -2 and 2.
This class provides a system of normal equations.
This total system is the weighted sum of individual normals.
\[
\M N_{total} = \sum_{k=1} \frac{1}{\sigma_k^2}\M N_k
\qquad\text{and}\qquad
\M n_{total} = \sum_{k=1} \frac{1}{\sigma_k^2} \M n_k.
\]The normals do not need to have the same dimension. The dimension
of the total combined system is chosen to cover all individual systems.
For each normal a startIndex is required which indicates
the position of the first unknown of the individual normal within the
combined parameter vector.
The $\sigma_k$ of the relative weights are defined by aprioriSigma
in a first step. If an apriori solution inputfileApproxSolution is
given or the normals are solved iteratively the weights are determined by means
of variance compoment estimation (VCE), see NormalsSolverVCE:
\[
\sigma_k^2 =
\frac{\M e_k^T\M P\M e_k}
{n_k-\frac{1}{\sigma_k^2}\text{trace}\left(\M N_k\M N_{total}^{-1}\right)},
\]where $n_k$ is the number of observations. The square sum of the residuals
is calculated by
\[
\M e_k^T\M P\M e_k = \M x^T\M N_k\M x - 2\M n_k^T\M x + \M l_k^T\M P_k\M l_k.
\]The system of normal equations can be solved with several right hand sides at once. But
only one right hand side, which can be selected with the index rightHandSide
(counting from zero), can be used to compute the variance factors.
The combined normal $\M N_{total}$ and the solution $\M x$ are taken from the previous
iteration step. In case of DesignVCE the algorithm
is a little bit different as described below.
Design
This class acculumates normal equations from observation equations.
The class observation computes
the linearized and decorrelated equation system for each arc $i$:
\[
\M l_i = \M A_i \M x + \M B_i \M y_i + \M e_i.
\]The arc depending parameters $\M y_i$ are eliminated and the system of normal
equations is acculumated according to
\[
\M N = \sum_{i=1}^m \M A_i^T \M A_i
\qquad\text{and}\qquad
\M n = \sum_{i=1}^m \M A_i^T \M l_i.
\]
add this normals at index of total matrix (counting from 0)
inputfileArcList
filename
to accelerate computation
DesignVCE
This class acculumates normal equations from observation equations.
The class observation computes
the linearized and decorrelated equation system for each arc $i$:
\[
\M l_i = \M A_i \M x + \M B_i \M y_i + \M e_i.
\]The arc depending parameters $\M y_i$ are eliminated and the system of normal
equations is acculumated according to
\[
\M N = \sum_{i=1} \frac{1}{\sigma_i^2}\M A_i^T \M A_i
\qquad\text{and}\qquad
\M n = \sum_{i=1} \frac{1}{\sigma_i^2} \M A_i^T \M l_i.
\]The variance $\sigma_i^2$ of each individual arc is determined by
\[
\sigma_i^2 =
\frac{(\M l_i-\M A_i\M x)^T(\M l_i-\M A_i\M x)}
{n_i-\frac{1}{\sigma_i^2}\text{trace}\left(\M A_i^T \M A_i\M N_{total}^{-1}\right)},
\]where $n_i$ is the number of observations. If an apriori solution is not given at the first
iteration step a zero vector is assumed.
Vector with the diagonal elements of the weight matrix
inputfileBias
filename
Matrix with right hand sides
aprioriSigma
double
startIndex
uint
regularization of parameters starts at this index (counting from 0)
RegularizationGeneralized
Generalized regularization which is represented by the observation equation
\[
\mathbf{x}_0 = \mathbf{I} \mathbf{x} + \mathbf{v}, \mathbf{v} \sim \mathcal{N}(0, \sum_k \sigma^2_k \mathbf{V}_k).
\]
There are no requirements for partial covariance matrices $\mathbf{V}_k$ except for them being symmetric.
The accumulated covariance matrix $\sum_k \sigma^2_k \mathbf{V}_k$ must be positive definite however.
The variance components $\sigma^2_k$ are estimated during the adjustment process and are assumed to be positive.
All inputfilePartialCovarianceMatrix must be of same size
and must match the dimension of inputfileBiasMatrix
(if provided, otherwise a zero vector of appropriate dimensions is created).
The parameter aprioriSigma determines the initial variance factor for the partial covariance matrices. Either one $\sigma_0$ can be
supplied or one for each $\mathbf{V}_k$.
The regularization matrix can be applied to a subset of parameters by adjusting startIndex.
Name
Type
Annotation
inputfilePartialCovarianceMatrix
filename
symmetric matrix (sum of all matrices must be positive definite)
inputfileBiasMatrix
filename
bias vector (default: zero vector)
aprioriSigma
double
apriori sigmas for initial iteration (default: 1.0)
startIndex
uint
regularization of parameters starts at this index (counting from 0)
This class sets up the observation equations in linearized Gauss-Markov model
\[\label{gmm}
\M l = \M A \M x + \M e\qquad\text{and}\qquad\mathcal{C}(\M e) = \sigma^2\M P^{-1}.
\]The observations are divided into short data blocks which can be computed independently
and so easily can be parallelized. Usually these data blocks are short arcs of a
satellite's orbit. In most cases the unknown parameter vector contains coefficients
of a gravity field parametrization given by parametrizationGravity.
Additional parameters like instrument calibration parameters are appended at the
end of the vector $\M x$.
It is possible to give several observation vectors in one model.
The observations within each arc are decorrelated in the following way:
In a first step a Cholesky decomposition of the covariance matrix is performed
\[
\M P^{-1} = \M W^T\M W,
\]where $\M W$ is an upper regular triangular matrix.
In a second step the transformation
\[\label{dekorrelierung}
\bar{\M A} = \M W^{-T}\M A\qquad\text{and}\qquad \bar{\M l} = \M W^{-T}\M l
\]gives an estimation from decorrelated observations with equal variance
\[\label{normal.GMM}
\bar{\M l} = \bar{\M A} \M x + \bar{\M e}
\qquad\text{and}\qquad
\mathcal{C}(\bar{\M e})= \sigma^2 \M I.
\]Usually the arc dependent parameters are eliminated in the next step
and not mentioned for the parameter names in the following.
PodVariational
The observation equations for precise orbit data (POD) are formulated as variational equations.
It is based on inputfileVariational calculated with PreprocessingVariationalEquation.
Necessary integrations are performed by integrating a moving interpolation polynomial of degree integrationDegree.
The kinematic positions as pseudo observations are taken from
rightHandSide and should not be given equally spaced in time. The observation
equations are interpolated to these times by a moving polynomial of degree interpolationDegree.
The accuracy or the full covariance matrix of the precise orbit data is provided in
covariancePod and can be estimated with PreprocessingPod.
accelerateComputation: In the event that the sampling of the kinematic orbit is much higher than the sampling
of the variational equations (e.g. 1 second vs. 5 seconds) the accumulation of the observation equations
can be accelerated by transforming the observation equations
\[
\M l = \M J \M A \M x + \M e,
\]where $\M J$ describes the interpolation of the sampling of the variational design matrix $\M A$
to the sampling of the observations $\M l$ with more rows than columns. The QR decomposition
\[
\M J = \begin{pmatrix} \M Q_1 & \M Q_2 \end{pmatrix}
\begin{pmatrix} \M R \\ \M 0 \end{pmatrix}.
\]can be used to transform the observation equations
\[
\begin{pmatrix} \M Q_1^T \M l \\ \M Q_2^T \M l \end{pmatrix} =
\begin{pmatrix} \M Q_1^T \M R \\ \M 0 \end{pmatrix} \M A \M x +
\begin{pmatrix} \M Q_1^T \M e \\ \M Q_2^T \M e \end{pmatrix}.
\]As the zero lines should not be considered, the computational time for the accumulation is reduced.
This option is not meaningful for evaluating the residuals such as in PreprocessingPod.
The observation equations for precise orbit data (POD) of short arcs are given by
\[
{\M r}_\epsilon(\tau) = {\M r}_A(1-\tau) + {\M r}_B\tau - T^2\int_0^1 K(\tau,\tau')
\left(\M f_0(\tau')+\nabla V(\tau')\right)\,d\tau'
\]with the integral kernel
\[
K(\tau,\tau') = \begin{cases} \tau'(1-\tau) & \text{for }\tau'\le\tau \\
\tau(1-\tau') & \text{for }\tau'>\tau \end{cases},
\]and the normalized time
\[
\tau = \frac{t-t_A}{T}\qquad\text{with}\qquad T=t_B-t_A.
\]The kinematic positions ${\M r}_\epsilon(\tau)$ as pseudo observations are taken from
rightHandSide. From these positions the influence of the reference forces $\M f_0(\tau)$
is subtracted which are computed with the background models in rightHandSide.
The integral is solved by the integration of a moving interpolation polynomial of degree integrationDegree.
The boundary values ${\M r}_A$ and ${\M r}_B$ (satellite's state vector) are estimated per arc
and are usually directly eliminated if keepSatelliteStates is not set.
The unknown gravity field $\nabla V(\M r, t)$ parametrized by parametrizationGravity
is not evaluated at the observed positions but at the orbit given by inputfileOrbit.
The same is true for the reference forces. The linearized effect of the gravity field change by the position
adjustment is taken into account by gradientfield. This may be a low order field up to a
spherical harmonics degree of $n=2$ or $n=3$.
The inputfileOrbit, inputfileStarCamera, and inputfileAccelerometer
must be synchronous and must be given with a constant sampling and without any gaps in each short arc
(see InstrumentSynchronize).
The kinematic positions ${\M r}_\epsilon(\tau)$ should not given equally spaced in time
but must be divided into the same arcs as the other instrument data.
The observation equations are interpolated to this time by a polynomial interpolation
with degree interpolationDegree.
The accuracy or the full covariance matrix of the precise orbit data is provided in
covariancePod and can be estimated with PreprocessingPod.
The observation equations for precise orbit data (POD) are given by
\[
\ddot{\M r}(t) - \M g_0(t) = \nabla V(\M r, t),
\]where the accelerations of the satellite $\ddot{\M r}(t)$ are derived from the kinematic positions
in rightHandSide. The orbit differentation is performed by a moving
polynomial interpolation or approximation with degree interpolationDegree
and number of used epochs numberOfEpochs. The reference forces $\M g_0(t)$ are computed
with the background models in rightHandSide.
The unknown gravity field $\nabla V(\M r, t)$ parametrized by parametrizationGravity
is not evaluated at the observed positions but at the orbit given by inputfileOrbit.
The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but
in most cases the kinematic orbit provides good results.
The accuracy or the full covariance matrix of the precise orbit data is provided in
covariancePod and can be estimated with PreprocessingPod.
The observation equations for precise orbit data (POD) are given by
\[
\frac{1}{2}\dot{\M r}^2
-\dot{\M r} \cdot (\M\Omega\times\M r)
+\int_{t_0}^t(\dot{\M\Omega}\times\M r)\cdot \dot{\M r}\,dt
- \int_{t_0}^t \M g_0 \cdot\dot{\M r}'\,dt
= V + E.
\]where the velocities of the satellite $\dot{\M r}(t)$ are derived from
the kinematic positions in rightHandSide and the Earth's rotation vector $\M\Omega(t)$ is modeled
within earthRotation. The orbit differentiation is
performed by a polynomial interpolation with degree interpolationDegree.
The integrals are solved by a polynomial interpolation with degree integrationDegree.
The reference forces $\M g_0(t)$ are computed with the background models in rightHandSide.
The unknown gravity potential $V(\M r)$ parametrized by parametrizationGravity
is not evaluated at the observed positions but at the orbit given by inputfileOrbit.
The same is true for the reference forces. This orbit may be a more accurate dynamical orbit but
in most cases the kinematic orbit provides good results.
An unknown energy bias $E$ per arc is parametrized by parametrizationBias
and should be a constant in theory but temporal changes might help to absorb other unmodelled effects.
The accuracy or the full covariance matrix of the precise orbit data is provided in
covariancePod and can be estimated with PreprocessingPod.
The following parameters with parameter names are set up:
*:<parametrizationGravity>:*:*.
Like observation:podVariational (see there for details)
but with two satellites and additional satellite-to-satellite (SST) observations.
If multiple inputfileSatelliteTracking are given,
all data are added together. So corrections in extra files like the light time correction
can easily be added. Empirical parameters for the SST observations can be set up with
parametrizationSst.
The accuracy or the full covariance matrix of SST is provided in
covarianceSst.
covariance matrix of kinematic orbits (satellite 2)
SstIntegral
Like observation:podIntegral (see there for details)
but with two satellites and additional satellite-to-satellite (SST) observations.
If multiple inputfileSatelliteTracking are given
all data are added together. So corrections in extra files like the light time correction
can easily be added. Empirical parameters for the SST observations can be set up with
parametrizationSst.
The accuracy or the full covariance matrix of SST is provided in
covarianceSst.
Both SST observation types are reduced by the same background models and the same impact
of accelerometer measurements. The covariance matrix of the reduced observations should not consider
the instrument noise only (covarianceSst1/2) but must
take the cross correlations covarianceAcc into account.
The covariance matrix of the reduced observations is given by
\[
\M\Sigma(\begin{bmatrix} \Delta l_{SST1} \\ \Delta l_{SST2} \end{bmatrix})
= \begin{bmatrix} \M\Sigma_{SST1} + \M\Sigma_{ACC} & \M\Sigma_{ACC} \\
\M\Sigma_{ACC} & \M\Sigma_{SST2} + \M\Sigma_{ACC}
\end{bmatrix}.
\]
The following parameters with parameter names are set up:
different accuaries for each arc (multplicated with sigma)
inputfileCovarianceFunction
filename
covariance function in time
Terrestrial
The gravity field is estimated from point wise measurements.
The gravity field parametrization is given by parametrizationGravity.
There is no need to have the data regular distributed or given on a sphere or ellipsoid.
The type of the gridded data (e.g gravity anomalies or geoid heights)
must be set with kernel.
A referencefield can be reduced beforehand.
The observations at given positions are calculated from
inputfileGriddedData.
The input columns are enumerated by data0, data1, … ,
see dataVariables.
The observations can be divided into small blocks for parallelization.
With blockingSize set the maximum count of observations in each block.
The following parameters with parameter names are set up:
*:<parametrizationGravity>:*:*.
segementation of the obervations if designmatrix can't be build at once
Deflections
The gravity field parametrized by parametrizationGravity
is estimated from deflections of the vertical measurements.
A referencefield can be reduced beforehand.
The observations $\xi$ in north direction and $\eta$ in east direction
at given positions are calculated from
inputfileGriddedData.
The input columns are enumerated by data0, data1, … ,
see dataVariables.
The ellipsoid parameters R and inverseFlattening are used
to define the local normal direction.
The observations can be divided into small blocks for parallelization.
With blockingSize set the maximum count of observations in each block.
The following parameters with parameter names are set up:
*:<parametrizationGravity>:*:*.
segementation of the obervations if designmatrix can't be build at once
StationLoading
Observation equations for displacements of a list of stations
due to the effect of time variable loading masses. The displacement $\M u$ of a station is calculated according to
\[
\M u(\M r) = \frac{1}{\gamma}\sum_{n=0}^\infty \left[\frac{h_n}{1+k_n}V_n(\M r)\,\M e_{up}
+ R\frac{l_n}{1+k_n}\left(
\frac{\partial V_n(\M r)}{\partial \M e_{north}}\M e_{north}
+\frac{\partial V_n(\M r)}{\partial \M e_{east}} \M e_{east}\right)\right],
\]where $\gamma$ is the normal gravity, the load Love and Shida numbers $h_n,l_n$ are given by
inputfileDeformationLoadLoveNumber and the load Love numbers $k_n$ are given by
inputfilePotentialLoadLoveNumber.
The $V_n$ are the spherical harmonics expansion of degree $n$ of the full time variable
gravitational potential (potential of the loading mass + deformation potential)
parametrized by parametrizationGravity.
Additional parameters can be setup to estimate the realization of the reference frame
of the station coordinates (estimateTranslation,
estimateRotation, and estimateScale).
The observations at stations coordinates are calculated from
inputfileGriddedData.
The input columns are enumerated by data0, data1, … ,
see dataVariables.
The ellipsoid parameters R and inverseFlattening are used
to define the local frame (north, east, up).
Reference:
Rietbroek (2014): Retrieval of Sea Level and Surface Loading Variations from Geodetic Observations
and Model Simulations: an Integrated Approach, Bonn, 2014. - Dissertation,
https://nbn-resolving.org/urn:nbn:de:hbz:5n-35460
Name
Type
Annotation
rightHandSide
sequence
input for observation vectors
inputfileGriddedData
filename
station positions with displacement data
observationNorth
expression
displacement [m]
observationEast
expression
displacement [m]
observationUp
expression
displacement [m]
sigmaNorth
expression
accuracy, 1/sigma used as weighting
sigmaEast
expression
accuracy, 1/sigma used as weighting
sigmaUp
expression
accuracy, 1/sigma used as weighting
inGlobalFrame
boolean
obs/sigmas given in global x,y,z frame instead of north,east,up
Implements the propagation of a satellite orbit under
the influence of forces as
used in SimulateOrbit
(dynamic orbits from numerical orbit integration).
Euler
This class implements Euler's method to propagate a satellite orbit under the influence of Forces.
Satellite is assumed to be oriented along-track.
RungeKutta4
This class implements the classical Runge-Kutta 4 method of orbit propagation
for satellite orbit under the influence of Forces.
No step-width control or other advanced features are implemented.
Satellite is assumed to be oriented along-track.
See: Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits
AdamsBashforthMoulton
This class implements the Adams-Moulton class of predictor-corrector orbit propagators
for a satellite orbit under the influence of Forces using an implicit
Adams-Bashforth corrector. The coefficients for the propagator are derived using the equations
given in section 4.2.3 of [1]. Satellite is assumed to be oriented along-track.
[1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits
This class implements the Stoermer-Cowell class of predictor-corrector orbit propagators for a satellite orbit
under the influence of Forces. The coefficients for the Stoermer predictor and Cowell corrector
are derived using the equations given in section 4.2.6 of [1]. Stoermer-Cowell is a double integration algorithm,
yielding positions directly from accelertions. It does not produce velocities. The velocities are derived using
Adams-type propagators as suggested in [2]. Satellite is assumed to be oriented along-track.
[1] Montenbruck, Oliver, and Eberhard Gill. 2000. Satellite Orbits
[2] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.”
This class implements the Gauss-Jackson multi-step predictor-corrector method to
propagate a satellite orbit under the influence of Forces.
Satellite is assumed to be oriented along-track. Implementation is based on [1].
[1] Berry, Matthew M., and Liam M. Healy. 2004. “Implementation of Gauss-Jackson Integration for Orbit Propagation.”
Maximum number of iterations to run the corrector step for.
epsilon
double
Convergence criteria for position, velocity, and acceleration tests.
Polynomial
This class implements an integration Polynomial method to propagate a satellite orbit under
the influence of Forces. Satellite is assumed to be oriented along-track.
Implementation is based on code by Torsten Mayer-Gürr.
Name
Type
Annotation
degree
uint
polynomial degree to integrate accelerations
shift
int
shift polynomial in future (predicted accelerations)
Parameter names of satellite acceleration parametrization.
Arc related parameters are appended if an inputfileInstrument is provided which
defines the arc structure.
An additional object name can be included in the parameter names.
Name
Type
Annotation
object
string
object these parameters refers to, e.g. graceA, G023
Parameter names of GNSS antenna center variation parametrization.
An additional object name (antenna name) can be included in the parameter names.
It is possible to setup the parameters for each gnssType.
This class provides an index vector from selected parameters,
which can be used e.g. to reorder a normal equation matrix.
The size of the index vector determines the size of the new matrix.
Entries are the indices of the selected parameters in the provided
parameter list or NULLINDEX for zero/new parameters.
Wildcard
Parameter index vector from name. Name matching supports wildcards * for any number of characters and ? for exactly one character.
Does not add zero/empty parameters if there are no matches.
Name
Type
Annotation
object
string
object this parameter refers to, e.g. graceA, G023, earth (wildcards: * and ?)
type
string
type of this parameter, e.g. accBias, position.x (wildcards: * and ?)
temporal
string
temporal representation of this parameter, e.g. trend, polynomial.degree1 (wildcards: * and ?)
interval
string
interval/epoch this parameter refers to, e.g. 2017-01-01_00-00-00_2017-01-02_00-00-00, 2008-01-01_00-00-00 (wildcards: * and ?)
Names
Parameter index vector from list of parameter names.
This class defines parameters of satellite accelerations.
It will be used to set up the design matrix in a least squares adjustment.
If multiple parametrizations are given the coefficients in the parameter vector
are sequently appended.
PerRevolution
Oscillation once, twice, ... per revolution in Satellite Reference Frame (SRF)
with the argument of latitude as input angle. If the attitude of the satellite
is not provided the Celestial Reference Frame (CRF) is used instead.
Paramters are estimated in $[nm/s^2=10^{-9}\,m/s^2]$.
Temporal changing accelerometer bias per axis in $[m/s^2]$ in Satellite Reference Frame (SRF).
If the attitude of the satellite is not provided the Celestial Reference Frame (CRF) is used instead.
Estimate the thermospheric density along the orbit using a satllite macro model.
An optional thermospheric model can be used to compute temperature and wind.
The temperature is used to estimate variable drag and lift coefficients, otherwise a constant drag coefficient is used.
The density is estimated in $[kg/m^3]$.
Parametrization of antenna center variations. It will be used to set up the design matrix in a least squares adjustment.
Usually the parametrization is setup separately for different gnssType.
If multiple parametrizations are given the parameters are sequently appended in the design matrix and parameter vector.
Center
Antenna center or, if setup for a specific gnssType,
phase/code center offset (e.g. *1*G for GPS L1 phase center offset) in $[m]$.
Parametrization of antenna center variations in $[m]$ in terms of spherical harmonics.
As usually only data above the horizon are observed only the even spherical harmonics
(degree/order $m+n$ even), which are symmetric to the equator, are setup.
The total count of parameters is $((n_{max}+1)(n_{max}+2)-n_{min}(n_{min}+1)/2$ and
the parameter names are
Parametrization of antenna center variations with radial basis functions
\[
ACV(\M x(A, E)) = \sum_i a_i \Phi(\M x\cdot\M x_i)
\]where $a_i$ in $[m]$ the coefficients which has to be estimated and $\Phi$ are the basis
functions
\[
\Phi(\cos\psi) = \sum_n \sqrt{2n+1}P_n(\cos\psi).
\]
The parameter names are
*:antennaCenterVariations.radialBasis.<index>.<total count>:*:*.
Figure: Nodal points of the basis functions
using a Reuter grid for transmitting satellites (view angle of 18 deg). The red line indicates the view angle of 14 deg of ground stations.
This class gives a parametrization of the time depending gravity field.
Together with the class oberservation it will be used
to set up the design matrix in a least squares adjustment.
If multiple parametrizations are given the coefficients in the parameter vector
are sequently appended.
SphericalHarmonics
The potential $V$ is parametrized by a expansion of (fully normalized) spherical harmonics
\[
V(\lambda,\vartheta,r) = \frac{GM}{R}\sum_{n=0}^\infty \sum_{m=0}^n \left(\frac{R}{r}\right)^{n+1}
\left(c_{nm} C_{nm}(\lambda,\vartheta) + s_{nm} S_{nm}(\lambda,\vartheta)\right).
\]You can set the range of degree $n$ with minDegree and maxDegree.
The sorting sequence of the potential coefficients in the parameter vector can be defined by
numbering.
The total count of parameters is $(n_{max}+1)^2-n_{min}^2$ and
the parameter names are
The potential $V$ is represented by a sum of space localizing basis functions
\[
V(\M x) = \sum_i a_i \Phi(\M x, \M x_i)
\]where $a_i$ the coefficients which has to be estimated and $\Phi$ are the basis
functions given by isotropic radial kernel functions
\[
\Phi(\cos\psi,r,R) = \sum_n \left(\frac{R}{r}\right)^{n+1} k_n\sqrt{2n+1}\bar{P}_n(\cos\psi).
\]The basis functions are located on a grid $\M x_i$ given by grid.
This class can also be used to estimate point masses if kernel is set to density.
The parameter names are *:radialBasis.<index>.<total count>:*:*.
The time variable potential is given by
\[
V(\M x,t) = \sum_i V_i(\M x)\Psi_i(t),
\]wehre $V_i(\M x)$ is the spatial parametrization of the gravity field
and can be choosen with parametrizationGravity.
The parametrization in time domain $\Psi_i(t)$ is selected by
parametrizationTemporal.
The total parameter count is the parameter count of parametrizationTemporal
times the parameter count of parametrizationGravity.
Parametrization of the gravity field on the basis of a linear transformation of a source parametrization.
The linear transformation changes the original solution space represented by
pararametrizationGravitySource from
\[
\mathbf{l} = \mathbf{A}\mathbf{x} + \mathbf{e}
\]to
\[
\mathbf{l} = \mathbf{A}\mathbf{F}\mathbf{y} + \mathbf{e}
\]through the linear transformation $\mathbf{x}=\mathbf{F}\mathbf{y}$.
It follows that the rows of the matrix $\mathbf{F}$ in inputfileTransformationMatrix coincides with
the number of parameters in pararametrizationGravitySource.
The new parameter count is given by the number of columns in $\mathbf{F}$ and may be smaller, equal or larger
than the original parameter count.
The parameter names are *:transformedParameter.<index>.<total count>:*:*.
transformation matrix from target to source parametrization (rows of this matrix must coincide with the parameter count of the source parametrization)
EarthquakeOscillation
This class is used to estimate the earthquake oscillation function parameters,
i.e. $C_{nlm}$, $\omega_{nlm}$, and $P_{nlm}$.
The results describes the variation in the gravitational potential field caused by large earthquakes.
\[
C_{lm}(\M t) = \sum_{n=0}^NC_{nlm}(1-\cos(\omega_{nlm}d\M t)\exp(P_{nlm}\omega_{nlm}d\M t)),
\]with $\omega_{nlm}=\frac{2\pi}{T_{nlm}}$ and $P_{nlm}=\frac{-1}{2Q_{nlm}}$ . In this equation, $Q_{nlm}$ is the attenuation factor,
$n$ is the overtone factor, $m$ is degree, $l$ is order, and $t$ is time after earthquake in second.
This class defines parameters of Satellite-to-Satellite tracking observations.
It will be used to set up the design matrix in a least squares adjustment.
If multiple parametrizations are given the coefficients in the parameter vector
are sequently appended.
AntennaCenter
Estimate the KBR antenna phase centre (APC) coordinates in $[m]$ for each spacecraft in satellite reference frame (SRF)
as constant per axis. The observation equations are computed by taking the derivative
of the antenna offset correction equation w.r.t. the KBR APC coordinates.
Estimate time shift in seconds in SST observations, with defined temporal variation
by parametrizationTemporal.
The design matrix is computed by taking the derivative of the ranging data w.r.t. time.
The parameter names are satellite1.satellite2:sstTimeBias:<temporal>:<interval>.
Estimate deterministic signals in the GRACE K-Band measurements caused by Sun intrusions
into the star camera baffles of GRACE-A and eclipse transits of the satellites.
These events can be time-indexed beforehand using satellite position and orientation,
see GraceSstSpecialEvents. The shape of this short-period waveform is nearly
constant within one month and can be approximated by a polynomial.
The amplitude variation of the waveform can also be taken into account
by parametrizationTemporal.
The parameter names are satellite1.satellite2:<type>.legendrePolynomial.n<degree>:<temporal>:<interval>.
This class gives a parametrization of time depending parameters (gravity field, positions, ...).
It will be used to set up the design matrix in a least squares adjustment.
If multiple parametrizations are given the coefficients in the parameter vector
are sequently appended.
Useally time intervals are defined half open meaning the last time belongs not to the interval.
This behaviour can be changed for the last interval with includeLastTime.
Constant
Represents a parameter being constant in time in each interval.
A time variable function is given by a linear trend
\[
f(x,t) = \frac{1}{T}(t-t_0) \cdot f_t(x),
\]with $t_0$ is timeStart and $T$ is timeStep in days.
A constant term is not included and must added separately.
The parameter name is *:*:trend.<timeStep(days)>*(t-<timeStart>):*.
Name
Type
Annotation
timeStart
time
reference time
timeStep
time
Splines
A time variable function is given by
\[
f(x,t) = \sum_i f_i(x)\Psi_i(t),
\]with the (spatial) coefficients $f_i(x)$ as parameters and the temporal basis functions $\Psi_i(t)$.
Basis splines are defined as polynomials of degree $n$ in intervals between nodal points in time $t_i$,
for details see basis splines.
The parameters are ordered timewise. First all parameters of $f_{i=1}(x)$ then
$f_{i=2}(x)$ and so on. The total parameter count in each interval is $N=N_t+n-1$,
where $N_t$ is the count of time points from timeSeries in each interval and $n$
is the degree.
The parameter names are *:*:spline.n<degree>:<interval of each spline>.
A time variable function is represented by Legendre polynomials in each interval.
The time is normed to $[-1,1)$ in each interval.
The total parameter count is $(N+1)M$,
where $N$ is the polynmial degree and $M$ the number of intervals with
the parameter names*:*:legendrePolynomial.n<degree>:<interval>.
A time variable function is given by a oscillation
\[
f(x,t) = f^c(\M x)\cos(\omega_i(t)) + f^s(\M x)\sin(\omega_i(t))
\]with $\omega_i=\frac{2\pi}{T_i}(t-t_0)$,
$t_0$ is timeStart and $T$ is timePeriod in days.
The parameter names are *:*:oscillation.cos(2*pi/<period(days)>*(t-<timeStart>)):*
and *:*:oscillation.sin(2*pi/<period(days)>*(t-<timeStart>)):*.
Name
Type
Annotation
period
time
[day]
time0
time
reference time
Fourier
A time variable function is given by a fourier expansion
\[
f(x,t) = \sum_{m=1}^M f_m^c(\M x)\cos(2\pi m \tau) + f_m^s(\M x)\sin(2\pi m \tau)
\]with the normalized time
\[
\tau = \frac{t-t_A}{t_B-t_A},
\]and $t_A$ is timeStart, $t_B$ is timeEnd in each interval
and $M$ is the fourierDegree.
The total parameter count is $2MN$, where $N$ is the number of intervals.
The parameters are sorted in following order: $f_1^c, f_1^s, f_2^c, \ldots$ with
the parameter names*:*:fourier.cos(<m>*x):<interval> and *:*:fourier.sin(<m>*x):<interval>.
The time variable function is given by a fourier expansion
\[
f(x,t) = \sum_{i} f_i^c(x)\cos(\Theta_i(t)) + f_i^s(x)\sin(\Theta_i(t)),
\]where $\Theta_i(t)$ are the arguments of the tide constituents $i$
\[
\Theta_i(t) = \sum_{k=1}^6 n_i^k\beta_k(t),
\]where $\beta_k(t)$ are the Doodson's fundamental arguments ($\tau,s,h,p,N',p_s$) and $n_i^k$
are the Doodson multipliers for the term at frequency $i$.
The multipliers must be given by doodson coded as Doodson number
(e.g. 255.555) or as names intoduced by Darwin (e.g. M2).
The total parameter count is $2N$ with $N$ the number of doodson frequencies.
The parameters are sorted in following order: $f_1^c, f_1^s, f_2^c, \ldots$ with
the parameter names*:*:doodson.cos(<doodsonName>):* and *:*:doodson.sin(<doodsonName>):*.
Select a list of platforms (stations, satellites, ...).
In a first step all platforms are selected if first selector excludes platforms
otherwise all platforms excluded. When every selector from top to bottom selects or deselects
(with exclude) the matching platforms.
Select all receivers/transmitters which match the
name, markerName, and markerNumber.
Name
Type
Annotation
name
string
wildcards: * and ?
markerName
string
wildcards: * and ?, from platform
markerNumber
string
wildcards: * and ?, from platform
exclude
boolean
deselect matching platforms
File
Select receivers/transmitters from each row of
inputfileStringTable.
Additional columns in a row represent alternatives
if previous names are not available (e.g. without observation file).
Name
Type
Annotation
inputfileStringTable
filename
list of names with alternatives
exclude
boolean
deselect first matching platforms
Equipment
Select all platforms which has the specified equipment in the processed time interval.
Name
Type
Annotation
name
string
wildcards: * and ?
serial
string
wildcards: * and ?
equipmentType
choice
equipment type
all
all types
gnssAntenna
sequence
antennas
radome
string
wildcards: * and ?
gnssReceiver
sequence
receivers
version
string
wildcards: * and ?
slrStation
SLR station
slrRetroReflector
laser retroreflector
satelliteIdentifier
sequence
satellite identifier
cospar
string
wildcards: * and ?
norad
string
wildcards: * and ?
sic
string
wildcards: * and ?
sp3
string
wildcards: * and ?
other
other types
exclude
boolean
deselect matching platforms
Exclude
Deselects all selected receivers/transmitters of
selector.
If set to 'yes', the directions right/up are changed to left/down.
Time
The input data are interpreted as MJD (modified Julian date).
The unit of the tick spacings should be appenend to the number and can be any of
Y (year, plot with 4 digits)
y (year, plot with 2 digits)
O (month, plot using FORMAT_DATE_MAP)
o (month, plot with 2 digits)
U (ISO week, plot using FORMAT_DATE_MAP)
u (ISO week, plot using 2 digits)
r (Gregorian week, 7-day stride from start of week TIME_WEEK_START)
K (ISO weekday, plot name of day)
D (date, plot using FORMAT_DATE_MAP)
d (day, plot day of month 0-31 or year 1-366, via FORMAT_DATE_MAP)
R (day, same as d, aligned with TIME_WEEK_START)
H (hour, plot using FORMAT_CLOCK_MAP)
h (hour, plot with 2 digits)
M (minute, plot using FORMAT_CLOCK_MAP)
m (minute, plot with 2 digits)
S (second, plot using FORMAT_CLOCK_MAP)
s (second, plot with 2 digits).
A secondary time axis can be added to specify larger intervals (e.g dates of hourly data).
Examples: Settings for Fig. plotAxisType:plotAxisTime1: majorTickSpacing=6H, secondary: majorTickSpacing=1D.
Figure: Time axis for daily data.
Settings for Fig. plotAxisType:plotAxisTime2: majorTickSpacing=2d, secondary: majorTickSpacing=1O, options=FORMAT_DATE_MAP="o yyyy".
Figure: Time axis for monthly data.
Settings for Fig. plotAxisType:plotAxisTime3: majorTickSpacing=1o, secondary: majorTickSpacing=1Y, options=FORMAT_DATE_MAP="mm".
Figure: Time axis for yearly data.
Name
Type
Annotation
min
time
The minimum value of the time axis. If no value is given, the minimum scale value is set automatically.
max
time
The maximum value of the time axis. If no value is given, the maximum scale value is set automatically.
Defines the content of an xy-plot of PlotGraph.
Multiple layers are are plotted sequentially. With plotOnSecondAxis
the alternative y-axis on the right hand side can be selected if provided.
LinesAndPoints
Draws a line and/or points (symbol)
of xy data. The standard dataVariables
are available to select the data columns of inputfileMatrix.
If no color of the symbol
is given a colorbar
is required and the color is determined by valueZ.
Additionally a vertical error bar can be plotted at each data point with
size valueErrorBar.
Draws a symmetrical envelope around valueY as function of valueX
using deviations valueErrors.
The standard dataVariables
are available to select the data columns of inputfileMatrix.
The data line itself is not plotted but must be added as extra
layer:linesAndPoints.
Name
Type
Annotation
inputfileMatrix
filename
each line contains x,y
valueX
expression
expression for x-values (input columns are named data0, data1, ...)
valueY
expression
expression for y-values (input columns are named data0, data1, ...)
Creates a bar plot with vertical or horizontal bars out of the given
x- and y-values. The standard dataVariables
are available to select the data columns of inputfileMatrix.
The bars ranges from valueBase (can be also an expression) to the valueY.
If no color is given a colorbar
is required and the color is determined by valueZ.
Creates a regular grid of yxz values. The standard dataVariables
are available to select the data columns of inputfileMatrix.
Empty grid cells are not plotted. Cells with more than one value will be set to the mean value.
The grid spacing is determined by the median spacing of the input data or set by incrementX/Y.
Plot degree amplitudes from a gravityfield.
The coefficients can be converted to different functionals with kernel.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly. It plots a solid line for the degree amplitude (signal)
and a dotted line for the formal errors per default.
Defines the content of a map of PlotMap. Multiple layers are are plotted sequentially.
GriddedData
Creates a regular grid of xyz values. The standard dataVariables
are available to select the data column of inputfileGriddedData.
Empty grid cells are not plotted. Cells with more than one value will be set to the mean value.
The grid spacing can be determined automatically for regular rectangular grids otherwise
it must be set with increment. To get a better display together with some projections
the grid should be internally resampled to higher resolution.
It is assumed that the points of inputfileGriddedData represents centers of grid cells.
This assumption can be changed with gridlineRegistered (e.g. if the data starts at the north pole).
Name
Type
Annotation
inputfileGriddedData
filename
value
expression
expression to compute values (input columns are named data0, data1, ...)
increment
angle
the grid spacing [degrees]
illuminate
boolean
illuminate grid
resample
sequence
intermediateDpi
double
oversample grid for a smoother visual effect
interpolationMethod
choice
interpolation method for oversampling
bspline
B-Spline interpolation
bicubic
bicubic interpolation
bilinear
bilinear interpolation
nearest
nearest neighbour interpolation
threshold
double
A threshold of 1.0 requires all (4 or 16) nodes involved in interpolation to be non-NaN. 0.5 will interpolate about half way from a non-NaN value; 0.1 will go about 90% of the way.
gridlineRegistered
boolean
treat input as point values instead of cell means
Points
Draws points (symbol) and/or lines
between the points. If no color of the symbol
is given a colorbar is required and the color is determined
by the value expression. The standard dataVariables
are available to select the data column of inputfileGriddedData.
Name
Type
Annotation
inputfileGriddedData
filename
value
expression
expression to compute color (input columns are named data0, data1, ...)
draw connecting lines as great circles (otherwise, a straight line is drawn instead)
Arrows
Draws an arrow for each point in inputfileGriddedData.
The arrows are defined by the expressions valueNorth/East.
The standard dataVariables
are available to select the correspondent data columns of inputfileGriddedData.
The scale factor converts the input units to cm in the plot.
If no color is given a colorbar is required
and the color is determined by the value expression.
With scaleArrow a reference arrow as legend can be plotted inside or outside the map.
Name
Type
Annotation
inputfileGriddedData
filename
grid file with north and east values for arrows
valueNorth
expression
expression to compute north values (input columns are named data0, data1, ...)
valueEast
expression
expression to compute east values (input columns are named data0, data1, ...)
value
expression
expression to compute arrow color (input columns are named data0, data1, ...)
scale
double
[cm per input unit] length scale factor
penSize
double
[pt] width of arrow shaft
headSize
double
[pt] size of arrow head, 0: no head, negative: reverse head
An image of the Earth's surface as seen from outer space -
the image is known as blue marble. The directory of inputfileChannels
contains several files in different resolutions representing the Earth's surface each
month throughout a year.
Figure: The blue marble.
Name
Type
Annotation
inputfileImage
filename
Blue Marble image file
brightness
double
brightness of bitmap [-1, 1]
illuminate
sequence
add hillshade based on topography
inputfileTopography
filename
GMT grid file containing topography.
azimuth
angle
direction of lighting source [deg]
elevation
angle
direction of lighting source [deg]
ambient
double
ambient lighting
diffuse
double
diffuse lighting
specular
double
specular reflection
shine
double
surface shine
amplitude
double
scale gradient by factor
Text
Writes a text at originLongitude and originLatitude position in the map.
With clip the text is cutted at the boundaries of the plotting area.
Name
Type
Annotation
text
string
originLongitude
angle
[deg]
originLatitude
angle
[deg]
offsetX
double
[cm] x-offset from origin
offsetY
double
[cm] y-offset from origin
alignment
string
L, C, R (left, center, right) and T, M, B (top, middle, bottom)
The Robinson projection, presented by Arthur H. Robinson in 1963,
is a modified cylindrical projection that is neither conformal nor equal-area.
Central meridian and all parallels are straight lines; other meridians are curved.
It uses lookup tables rather than analytic expressions to make the world map look right.
Name
Type
Annotation
centralMeridian
angle
central meridian [degree]
Orthographic
The orthographic azimuthal projection is a perspective projection from infinite distance.
It is therefore often used to give the appearance of a globe viewed from space.
Name
Type
Annotation
lambdaCenter
angle
central point [degree]
phiCenter
angle
central point [degree]
Perspective sphere
The orthographic azimuthal projection is a perspective projection from infinite distance.
It is therefore often used to give the appearance of a globe viewed from space.
Name
Type
Annotation
lambdaCenter
angle
longitude of central point in degrees
phiCenter
angle
latitude of central point in degrees
altitude
double
[km]
azimuth
angle
to the east of north of view [degrees]
tilt
angle
upward tilt of the plane of projection, if negative, then the view is centered on the horizon [degrees]
viewpointTwist
angle
clockwise twist of the viewpoint [degrees]
viewpointWidth
angle
width of the viewpoint [degrees]
viewpointHeight
angle
height of the viewpoint [degrees]
Polar
Stereographic projection around given central point.
A particular subset of the transverse Mercator is the Universal Transverse Mercator (UTM)
which was adopted by the US Army for large-scale military maps.
Here, the globe is divided into 60 zones between 84$^{o}$S and 84$^{o}$N, most of which are 6$^{o}$ wide.
Each of these UTM zones have their unique central meridian.
Name
Type
Annotation
zone
string
UTM zone code (e.g. 33N)
Lambert
This conic projection was designed by Lambert (1772) and has been used extensively for mapping of regions with predominantly east-west orientation.
Name
Type
Annotation
lambda0
angle
longitude of projection center [deg]
phi0
angle
latitude of projection centert [deg]
phi1
angle
latitude of first standard parallel [deg]
phi2
angle
latitude of first standard parallel [deg]
Linear
Linear mapping of longitude/latitude to x/y (Plate Caree).
Mollweide
This pseudo-cylindrical, equal-area projection was developed by Mollweide in 1805. Parallels are unequally spaced straight
lines with the meridians being equally spaced elliptical arcs. The scale is only true along latitudes 40$^{o}$44' north and south.
The projection is used mainly for global maps showing data distributions.
Observation vector for precise orbit data (POD) of observation
equations in a least squares adjustment. The observations are reduced by the effect of
inputfileAccelerometer and forces
(observed minus computed).
Name
Type
Annotation
podRightSideType
sequence
inputfileOrbit
filename
kinematic positions of satellite as observations
inputfileAccelerometer
filename
non-gravitational forces in satellite reference frame
This reference manual details programs included in GROOPS, describing what they are and what they do.
For usage examples see the cookbook in the documentation overview.
Observation vector for gradiometer data (satellite gravity gradiometry, SGG)
of observation equations in a least squares adjustment.
The observations are reduced by an inputfileReferenceGradiometer,
the effect of referencefield, and tides
(observed minus computed).
This class defines the models and parameters of the linearized observation equations
for normal points (see SlrProcessing)
\[\label{slrParametrizationType:model}
\M l - \M f(\M x_0) = \left.\frac{\partial \M f(\M x)}{\partial \M x}\right|_{\M x_0} \Delta\M x + \M\epsilon,
\]where the left side is the observation vector minus the effects computed from the a priori models.
After each least squares adjustment
(see SlrProcessing:processingStep:estimate)
the a priori parameters are updated
\[\label{slrParametrizationType:update}
\M x_0 := \M x_0 + \Delta\hat{\M x}.
\]The vector $\M x_0$ can be written with
SlrProcessing:processingStep:writeAprioriSolution.
Any outputfiles defined in the parametrizations are written with
SlrProcessing:processingStep:writeResults.
Each parametrization (and possible constraint equations) has a name which enables
activating/deactivating the estimation of subsets of $\Delta\M x$ with
SlrProcessing:processingStep:selectParametrizations.
The a priori model $\M f(\M x_0)$ is unaffected and is always reduced.
The model for the one way range observations between station $s$ and reflector $r$
can be described as
\[\label{slrParametrizationType:slrFullModel}
\begin{split}
f_s^r(\M x) &= \frac{1}{2}\left(\left\lVert \M r^r(t_{bounce})-\M r_s(t_{trans}) \right\rVert
+ \left\lVert \M r_s(t_{recv})-\M r^r(t_{bounce}) \right\rVert\right) \\
&+ \text{troposphere}(t,\M r_{ss}^r)
+ \text{bias}^r + \text{bias}_s + \text{bias}_s^r + \text{other}(\ldots) + \epsilon_r^s
\end{split}
\]
See also SlrProcessing.
Troposphere
A priori tropospheric correction is handled by a troposphere model (e.g. Mendes and Pavlis).
Additional parameters in $[m]$ for zenith delay can be set up via
troposphereEstimation.
These parameters can be soft-constrained using
parametrization:constraints
to avoid an unsolvable system of normal equations in case of data gaps.
The parameter names are <station>:troposphere:<temporal>:<interval>.
Estimates a static position for all
selectReceivers in the terrestrial frame.
No-net constraints can be applied for a subset of stations,
selectNoNetReceivers, with a
standard deviation of noNetTranslationSigma and noNetRotationSigma and noNetScaleSigma and noNetScaleSigma.
If the template inputfileNoNetPositions is provided
the constraints are applied relatively to these positions. Only stations with an existing position file
are considered. Without inputfileNoNetPositions
the constraints are applied towards the apriori values from
SlrProcessing:station.
As a single corrupted station position can disturb the no-net conditions,
the rotation/translation parameters are estimated in a
robust least squares adjustment
beforehand. The computed weight matrix is used to downweight corrupted stations
in the constraint equations.
In case you want to align to an ITRF/ILRS reference frame, precise coordinates can be
generated with Sinex2StationPositions.
Constraints on the defined parameters can be added via
parametrization:constraints.
An example would be to set up estimateUT1:constant
so the $dUT1$ parameter is included in the normal equation system . Since $dUT1$ cannot be
determined by SLR, a hard constraint to its a priori value can then be added.
Add a pseudo observation equation (constraint)
for each selected parameters
\[
b-x_0 = 1 \cdot dx + \epsilon,
\]where $b$ is the bias and $x_0$ is the a priori value of the parameter
if relativeToApriori is not set.
The standard deviation sigma is used to weight the observation equations.
Processing steps enable a dynamic definition of the consecutive steps performed during any kind of SLR processing.
The most common steps are estimate, which performs an iterative least
squares adjustment, and writeResults, which writes all output files
defined in SlrProcessing and is usually the last step.
Some steps such as selectParametrizations
and selectStations affect all subsequent steps.
In case these steps are used within a group step,
they only affect the steps within this level.
Estimate
Iterative non-linear least squares adjustment.
In every iteration it accumulates the system of normal equations, solves the system and updates the estimated parameters.
The estimated parameters serve as a priori values in the next iteration and the following processing steps.
Iterates until either every single parameter update (converted to an influence in meter)
is below a convergenceThreshold or maxIterationCount is reached.
With computeResiduals the observation equations are computed
again after each update to compute the observation residuals.
The overall standard deviation of a single observation used for the weighting
is composed of several factors
\[
\hat{\sigma}_i = \hat{\sigma}_i^{huber} \hat{\sigma}^{stat} \sigma_{apriori}^{stat},
\]where the $\sigma_{apriori}^{stat}$ is given by station:accuracy.
The other factors are estimated iteratively from the residuals.
With computeWeights a standardized variance $\hat{s}_i^2$
for each residual $\hat{\epsilon}_i$ is computed
\[
\hat{s}_i^2 = \frac{1}{\hat{\sigma}^{stat} \sigma_{apriori}^{stat}}\frac{\hat{\epsilon}_i^2}{r_i}
\qquad\text{with}\qquad
r_i = \left(\M A\left(\M A^T\M A\right)^{-1}\M A^T\right)_{ii}
\]taking the redundancy $r_i$ into account. If $\hat{s}_i$ is above a threshold huber
the observation gets a higher standard deviation used for weighting according to
\[
\hat{\sigma}_i^{huber} =
\left\{ \begin{array}{ll}
1 & s < huber,\\
(\hat{s}_i/huber)^{huberPower} & s \ge huber
\end{array} \right.,
\]similar to robust least squares adjustment.
With adjustSigma0 an individual variance factor can be computed for each station separately
\[
\hat{\sigma}^{stat} = \sqrt{\frac{\hat{\M\epsilon}^T\M P\hat{\M\epsilon}}{r}}.
\]
It is usually the last processing step, but can also be used at other points in the
processing in combination with suffix to write intermediate results.
Name
Type
Annotation
suffix
string
appended to every output file name (e.g. orbit.G01.suffix.dat)
WriteNormalEquations
Accumulates the normal equations matrix and writes it.
If remainingParameters
is set only the selected parameters are written to the normal equations
and all other parameters are eliminated beforehand (implicitly solved).
parameter order/selection of output normal equations
WriteResiduals
Writes the observation residuals for all
selectStations. For for each station-satellite
pair a file is written. The file name is interpreted as a template with
the variables {station} and {satellite} being replaced by the station name.
Enable/disable parameter groups and constraint groups for subsequent steps,
e.g. processingStep:estimate or
processingStep:writeResults.
The name and nameConstraint of these groups
are defined in parametrizations.
Prior models or previously estimated parameters used as new apriori $\M x_0$ values are unaffected
and they are always reduced from the observations. This means all unselected parameters are kept fixed
to their last result.
Name
Type
Annotation
parametrization
choice
enable
sequence
name
string
wildcards: * and ?
disable
sequence
name
string
wildcards: * and ?
SelectSatellites
This step can be used to process only a subset of satellites in subsequent processing steps.
Perform these processing steps. This step can be used to structure complex processing flows.
The select.. processing steps
defined within a group only affect the steps within this group.
A list of satellite names must be provided via inputfileSatelliteList.
The other input files are read for each satellite, where the file name is interpreted as a template
with the variable {satellite} being replaced by the satellite name from list.
The inputfileSatelliteInfo contains information about laser retro-reflector,
optical reference point, retro-reflector orientation, range corrections and center of mass.
It can be created via PlatformCreate.
If inputfileAttitude ist not provided an orbit reference frame
(along, cross, nearly nadir) is assumed.
Name
Type
Annotation
inputfileSatelliteList
filename
ascii file with satellite names, used to loop variable {satellite}
The apriori observation weighting is defined by the expression accuracy in $[m]$.
The following variables are defined for each observation from the
inputfileObservations: {residual}, {accuracy},
{redundancy}, {laserWavelength}, {azimut}, {elevation}.
Observations with non-positive accuracies are removed.
This can be used for a rough outlier removal by an expression such as
accuracy = if(abs(residual)>30, NAN, accuracy).
The effects of loading and tidal deformation on station positions can be corrected for
via loadingDisplacement and
tidalDisplacement, respectively.
Tidal deformations typically include:
Numbering order by order:
\[ c20, c30, c40, \ldots, c21, s21, c31, s31, \ldots, c22, s22 \]
OrderNonAlternating
Numbering order by order with cnm, snm non-alternating:
\[ c20, c30, c40, \ldots, c21, c31, c41, \ldots, s21, s31, s41, \]
File
Numbering as specified in the chosen file.
The inputfile is a matrix with the first column indicating cnm/snm with 0 or 1.
The second and third column specify degree and order.
Observation vector for GRACE like data (satellite-tracking and precise orbit data (POD))
of observation equations in a least squares adjustment.
The observations are reduced by the effect of inputfileAccelerometer
and forces (observed minus computed).
Name
Type
Annotation
sstRightSideType
sequence
inputfileSatelliteTracking
filename
ranging observations and corrections
inputfileOrbit1
filename
kinematic positions of satellite A as observations
inputfileOrbit2
filename
kinematic positions of satellite B as observations
inputfileAccelerometer1
filename
non-gravitational forces in satellite reference frame A
inputfileAccelerometer2
filename
non-gravitational forces in satellite reference frame B
================================================
FILE: docs/html/static/groops.css
================================================
/* -- general alignment ---------------------------------------------------- */
.center {
text-align: center;
}
.top-buffer {
margin-top: 1.0em;
}
.list-group {
list-style-position: inside;
}
/* -- icon styles ---------------------------------------------------- */
img.figure {
display: block;
margin-left: auto;
margin-right: auto;
}
img.groopsIcon {
height: 1.2em;
vertical-align: middle;
}
img.logo {
height: 1.5em;
vertical-align: middle;
}
/* -- table styles ---------------------------------------------------- */
div.config {
background-size: 1.2em;
background-position: center left;
background-repeat: no-repeat;
padding-left: 1.5em;
}
div.optional {
background-image: url("element.svg");
}
div.mustset {
background-image: url("element-mustset.svg");
}
div.mustset-unbounded {
background-image: url("element-mustset-unbounded.svg");
}
div.optional-unbounded {
background-image: url("element-unbounded.svg");
}
div.config-tree {
background-image: url("connector.svg");
background-repeat: no-repeat;
}
div.depth-0 {
background: none;
padding-left: 0.0em;
}
div.depth-1 {
padding-left: 1.6em;
background-position: center left 0.1em;
}
div.depth-2 {
padding-left: 3.2em;
background-position: center left 1.7em;
}
div.depth-3 {
padding-left: 4.8em;
background-position: center left 3.3em;
}
div.depth-4 {
padding-left: 6.4em;
background-position: center left 4.9em;
}
div.depth-5 {
padding-left: 8.0em;
background-position: center left 6.5em;
}
td {
padding: 3px !important;
}
/* -- inline icons ---------------------------------------------------- */
.groops-program
{
background-image: url("program.svg");
background-repeat: no-repeat;
background-position: center left;
background-size: 1.0em;
padding-left: 1.2em;
font-weight: bold;
}
.groops-class
{
background-image: url("element.svg");
background-repeat: no-repeat;
background-position: center left;
background-size: 1.0em;
padding-left: 1.2em;
font-weight: bold;
}
.groops-config-element
{
background-image: url("element.svg");
background-repeat: no-repeat;
background-position: center left;
background-size: 1.0em;
padding-left: 1.2em;
font-weight: bold;
}
.groops-file
{
background-image: url("file.svg");
background-repeat: no-repeat;
background-position: center left;
background-size: 1.0em;
padding-left: 1.1em;
font-weight: bold;
}
.groops-ref
{
}
/* -- search box ---------------------------------------------------- */
div.faded {
height: 5em;
overflow-y: hidden;
}
div.faded:after {
content : "";
position : absolute;
z-index : 1;
bottom : 0;
left : 0;
pointer-events : none;
background-image : linear-gradient(to bottom,
rgba(255,255,255, 0),
rgba(255,255,255, 1) 90%);
width : 100%;
height : 3em;
}
div.search-result {
min-height: 5em;
}
/* -- end ------------------------------------------------------------- */
================================================
FILE: docs/html/static/searchtools.js
================================================
/** Global variables */
var idx = lunr(function () {
this.ref('key')
this.field('name')
this.field('display_text')
this.field('description')
this.field('config_table')
this.metadataWhitelist = ['position']
for(var i in documents)
this.add(documents[i]);
})
var wrapThreshold = 6 /** number of search results per page */
var paginationCount = 5 /** number of (maximum) pagination links */
var activePageIndex = 0 /** index of currently viewed search result page */
var currentPaginationLinks; /** array of currently available page links in pagination list */
var resultRangePerPage; /** array containing the intervals of search results per page (ctd. blockIndex) */
/** Functions for search result presentation */
/**
* Show/hide search result boxes
*
* @param oldIndex page index of results to hide
* @param newIndex page index of results to show
*/
function toggleSearchResults(oldIndex, newIndex) {
var resultList = document.getElementById("searchResults");
for(var k = resultRangePerPage[oldIndex]; k(resultRangePerPage.length-2) || (currentPaginationLinks[0]+shift)<0 ) // if the shift would put outside bounds, ignore
shift = 0
for(i in currentPaginationLinks) // update pagination link numbers
currentPaginationLinks[i] += shift
activePageIndex = newPageIndex // update HTML elements
updatePaginationLinks(currentPaginationLinks, activePageIndex)
}
/**
* Update HTML elements of pagination buttons
*
* @param paginationLinks array of page indices currently in pagination list
* @param newIndex index of page to be displayed
*/
function updatePaginationLinks(paginationLinks, newIndex) {
var paginationSpan = document.getElementById("pagination-page-number")
paginationSpan.innerHTML = (newIndex+1)
for(var k = 0; k'+(paginationLinks[k]+1)+''
}
}
/**
* Emphasize text based on search result position
*
* @param position array of search result position in text
* @param text text to be changed
*/
function highlightResult(position, text) {
var highlight = '' + text.slice(position[0], position[0]+position[1]+1) + ''
return text.slice(0, position[0]) + highlight + text.slice(position[0]+position[1]+1, text.length)
}
/**
* Function to expand a search result box on button click
*
* @param hiddedId id of box to be expanded
*/
function expandSearchBox(hiddenId)
{
var searchBox = document.getElementById(hiddenId)
searchBox.classList.toggle("faded")
}
/**
* Parse search terms for URL. This function generates the query which is passed to the search module
*
* @param rawInput URL POST token
*/
function parseSearchTerms(rawInput)
{
var tokens = rawInput.split('+')
var searchString = tokens[0]
for(var i = 1; i{{name}}' +
'+' +
'
{{{content}}}
'
/** HTML template for the pagination links */
var templatePagination = '
'+
'
«
'+
'{{#pages}}
{{.}}
{{/pages}}'+
'
»
'+
'
'
/** Search result generation */
/**
* This function should be called when the search page is loaded. It parses the POST data from the URL and queries the
* search index. The results are dynamically displayed as (potentially) hidden cards.
*/
function startSearch() {
var rawSearchString = window.location.search.substr(1).split('=')[1]
var metaInfo = document.getElementById("search-meta-info")
var list = document.getElementById("searchResults")
while(list.firstChild) {
list.removeChild(list.firstChild);
}
if(rawSearchString == '') // short circuit on empty search
{
metaInfo.innerHTML = 'Empty search.';
return;
}
var searchQuery = parseSearchTerms(rawSearchString) // create query from user input and search index
var results = idx.search(searchQuery);
if(results.length == 0) // short circuit on no results
{
metaInfo.innerHTML = 'Your search - "'+rawSearchString+'" - did not return any results.'
return;
}
resultRangePerPage = [0]
while(resultRangePerPage[resultRangePerPage.length-1]=wrapThreshold) ? 'display: none;' : ''}
var renderedSearchResult = Mustache.render(templateSearchResult, data)
list.insertAdjacentHTML('beforeend', renderedSearchResult)
}
currentPaginationLinks = Array.apply(null, {length: Math.min(paginationCount, resultRangePerPage.length-1)}).map(Number.call, Number)
var data = {'pages': currentPaginationLinks}
var renderedPagination = Mustache.render(templatePagination, data)
document.getElementById("content").insertAdjacentHTML('beforeend', renderedPagination)
activePageIndex = 0
updatePaginationLinks(currentPaginationLinks, activePageIndex)
}
================================================
FILE: docs/html/static/template.html
================================================
GROOPS - [[[title]]]
This class provides functions for calculating the density, temperature and velocity
in the thermosphere.
The wind is computed by HWM14 model if hwm14DataDirectory is provided.
A quiet thermosphere is assumed if inputfileMagnetic3hAp is not given.
JB2008
Thermosphere parameters from the JB2008 model:
Bowman, B. R., Tobiska, W. K., Marcos, F. A., Huang, C. Y., Lin, C. S., Burke, W. J. (2008).
A new empirical thermospheric density model JB2008 using new solar and geomagnetic indices.
In AIAA/AAS Astrodynamics Specialist Conference and Exhibit. https://doi.org/10.2514/6.2008-6438
Emmert J.D, D.P.Drob, J.M. Picone, et al. (2020), NRLMSIS 2.0: A whole-atmosphere empirical
model of temperature and neutral species densities. Earth and Space Science, Volume 8, 3
https://doi.org/10.1029/2020EA001321
This class computes functionals of the time depending tide potential,
e.g potential, acceleration or gravity gradients.
If several instances of the class are given the results are summed up.
Before summation every single result is multiplicated by a factor.
To get the difference between two ocean tide models you must choose one factor by 1
and the other by -1. To get the mean of two models just set each factor to 0.5.
AstronomicalTide
This class computes the tide generating potential (TGP) of sun, moon
and planets (Mercury, Venus, Mars, Jupiter, Saturn).
It takes into account the flattening of the Earth (At the moment only at the acceleration level).
The computed result is multiplied with factor.
Name
Type
Annotation
useMoon
boolean
TGP of moon
useSun
boolean
TGP of sun
usePlanets
boolean
TGP of planets
useEarth
boolean
TGP of Earth
c20Earth
double
J2 flattening of the Earth
factor
double
the result is multiplied by this factor, set -1 to subtract the field
EarthTide
This class computes the earth tide according to the IERS2003 conventions.
The values of solid Earth tide external potential Love numbers and
the frequency dependent corrections of these values are given in the file
inputfileEarthtide. The effect of the permanent tide is removed if
includePermanentTide is set to false.
The computed result is multiplied with factor.
Name
Type
Annotation
inputfileEarthtide
filename
includePermanentTide
boolean
results in FALSE: zero tide, TRUE: tide free gravity field
factor
double
the result is multiplied by this factor, set -1 to subtract the field
PoleTide
The potential coefficients of the solid Earth pole tide according to the
IERS2003 conventions are given by
\[
\begin{split}
\Delta c_{21} &= s\cdot(m_1 + o\cdot m_2), \\
\Delta s_{21} &= s\cdot(m_2 - o\cdot m_1),
\end{split}
\]with $s$ is the scale, $o$ is the outPhase and
$(m_1,m_2)$ are the wobble variables in seconds of arc.
They are related to the polar motion variables $(x_p,y_p)$ according to
\[
\begin{split}
m_1 &= (x_p - \bar{x}_p), \\
m_2 &= -(y_p - \bar{y}_p),
\end{split}
\]The mean pole $(\bar{x}_p, \bar{y}_p)$ is approximated by a polynomial
read from inputfileMeanPole.
The displacment is calculated with
\[
\begin{split}
S_r &= -v\sin2\vartheta(m_1\cos\lambda+m_2\sin\lambda),\\
S_\vartheta &= -h\cos2\vartheta(m_1\cos\lambda+m_2\sin\lambda),\\
S_\lambda &= h\cos\vartheta(m_1\sin\lambda-m_2\cos\lambda),
\end{split}
\]where $h$ is the horizontalDisplacement
and $v$ is the verticalDisplacement.
The computed result is multiplied with factor.
Name
Type
Annotation
scale
double
outPhase
double
inputfileMeanPole
filename
horizontalDisplacement
double
[m]
verticalDisplacement
double
[m]
factor
double
the result is multiplied by this factor, set -1 to subtract the field
OceanPoleTide
The ocean pole tide is generated by the centrifugal effect of polar motion on the oceans.
The potential coefficients of this effect is given by
IERS2003 conventions are given by
\[
\begin{Bmatrix}
\Delta c_{nm} \\
\Delta s_{nm}
\end{Bmatrix}=
\begin{Bmatrix}
c_{nm}^R \\
s_{nm}^R
\end{Bmatrix}
(m_1\gamma^R+m_2\gamma^I)+
\begin{Bmatrix}
c_{nm}^I \\
s_{nm}^I
\end{Bmatrix}
(m_2\gamma^R-m_1\gamma^I)
\]where the coefficients are read from file inputfileOceanPole,
$\gamma=\gamma^R+i\gamma^I$ is given by gammaReal and
gammaImaginary and $(m_1,m_2)$ are the wobble variables in radians.
They are related to the polar motion variables $(x_p,y_p)$ according to
\[
\begin{split}
m_1 &= (x_p - \bar{x}_p), \\
m_2 &= -(y_p - \bar{y}_p),
\end{split}
\]The mean pole $(\bar{x}_p, \bar{y}_p)$ is approximated by a polynomial
read from inputfileMeanPole.
The computed result is multiplied with factor.
Name
Type
Annotation
inputfileOceanPole
filename
minDegree
uint
maxDegree
uint
gammaReal
double
gammaImaginary
double
inputfileMeanPole
filename
factor
double
the result is multiplied by this factor, set -1 to subtract the field
DoodsonHarmonicTide
The time variable potential of ocean tides is given by a fourier expansion
\[
V(\M x,t) = \sum_{f} V_f^c(\M x)\cos(\Theta_f(t)) + V_f^s(\M x)\sin(\Theta_f(t)),
\]where $V_f^c(\M x)$ and $V_f^s(\M x)$ are spherical harmonics expansions and are
read from the file inputfileDoodsonHarmonic.
If set the expansion is limited in the range between minDegree
and maxDegree inclusivly.
$\Theta_f(t)$ are the arguments of the tide constituents $f$:
\[
\Theta_f(t) = \sum_{i=1}^6 n_f^i\beta_i(t),
\]where $\beta_i(t)$ are the Doodson's fundamental arguments ($\tau,s,h,p,N',p_s$)
and $n_f^i$ are the Doodson multipliers for the term at frequency $f$.
After the interpolation step a selection of the computed constituents can be
choosen by selectDoodson. Only these constiuents are considered for the results.
If no selectDoodson is set all constituents will be used. The constituents can
be coded as Doodson number (e.g. 255.555) or as names intoduced by Darwin (e.g. M2).
consider only these constituents, code number (e.g. 255.555) or darwin name (e.g. M2)
minDegree
uint
maxDegree
uint
nodeCorr
uint
nodal corrections: 0-no corr, 1-IHO, 2-Schureman
factor
double
the result is multiplied by this factor, set -1 to subtract the field
Centrifugal
Computes the centrifugal potential in a rotating system
\[
V(\M r, t) = \frac{1}{2} (\M\omega(t)\times\M r)^2.
\]The current rotation vector $\M\omega(t)$ is computed from the
earthRotation
provided by the calling program.
The computed result is multiplied with factor.
Be careful, the centrifugal potential is not harmonic.
Convolution with a harmonic kernel (e.g. to compute gravity
anomalies) is not meaningful.
Name
Type
Annotation
factor
double
the result is multiplied by this factor, set -1 to subtract the field
SolidMoonTide
This class computes the solid moon tide according to the IERS2010 conventions.
The values of solid Moon tide external potential Love numbers are given and
there are no frequency dependent corrections of these values.
The computed result is multiplied with factor.
Name
Type
Annotation
k20
double
k30
double
factor
double
the result is multiplied by this factor, set -1 to subtract the field
Group
Groups a set of tides and has no further effect itself.
This class generates a series of points in time. The series is always sorted in ascending order.
Depending of the application the series is interpreted as list of points or as intervals between the points.
Figure: List of points $t_i$ vs. intervals $T_i$.
UniformSampling
Generates a time series with uniform sampling. The first point in time will be timeStart.
The last generated point in time will be less or equal timeEnd.
The time step between generated points in time is given by sampling.
Name
Type
Annotation
timeStart
time
first point in time
timeEnd
time
last point in time will be less or equal timeEnd
sampling
time
time step between points in time
UniformInterval
Generates a time series with uniform sampling between timeStart and timeEnd.
intervallCount gives the count of intervals. This class generates count+1 points in time
inclusive timeStart and timeEnd.
Name
Type
Annotation
timeStart
time
1st point of the time series
timeEnd
time
last point of the time series
intervalCount
uint
count of intervals, count+1 points in time will generated
Irregular
The points of the time series are given explicitly with time.
Name
Type
Annotation
time
time
explicit list of points in time
Monthly
If useMonthMiddle is set, time points are generated at mid of each month inclusively
the monthStart in yearStart and monthEnd in yearEnd.
Otherwise times are given at the first of each month and a time point after the last month.
Name
Type
Annotation
monthStart
uint
yearStart
uint
monthEnd
uint
yearEnd
uint
useMonthMiddle
boolean
time points are mid of months, otherwise the 1st of each month + a time point behind the last month
Yearly
If useYearMiddle is set, time points are generated at mid of each year inclusively yearStart
and yearEnd. Otherwise times are given at the first of each year and a time point after the last year.
Name
Type
Annotation
yearStart
uint
yearEnd
uint
useYearMiddle
boolean
time points are mid of years, otherwise the 1st of each year + a time point behind the last year
EveryMonth
Generates a time series with monthly sampling. The first point in time will be timeStart and the following
points are generated for each month at the same day and time in month.
The last generated point in time will be less or equal timeEnd.
Name
Type
Annotation
timeStart
time
first point in time
timeEnd
time
last point in time will be less or equal timeEnd
EveryYear
Generates a time series with yearly sampling. The first point in time will be timeStart and the following
points are generated for each year at the same day and time in year.
The last generated point in time will be less or equal timeEnd.
Name
Type
Annotation
timeStart
time
first point in time
timeEnd
time
last point in time will be less or equal timeEnd
Instrument
Read a time series (epochs) from an instrument file.
The time series can be restricted to the interval
starting from timeStart and before timeEnd.
Name
Type
Annotation
inputfileInstrument
filename
timeStart
time
inclusive, i.e. exclude eochs before this epoch
timeEnd
time
exclusive, i.e. only epochs before this time are used
InstrumentArcIntervals
Reconstruct a time series from an instrument file.
The time series is the first epoch of each arc plus one time step beyond the last
epoch of the last arc (using median sampling).
Name
Type
Annotation
inputfileInstrument
filename
Must be regular. Time series is first epoch of each arc plus one time step extrapolated from last epoch of last arc.
Revolution
Reads an orbit file and create a time stamp for each ascending equator crossing.
The time series can be restricted to the interval
starting from timeStart and before timeEnd.
Name
Type
Annotation
inputfileOrbit
filename
timeStart
time
exclude eochs before this epoch
timeEnd
time
only epochs before this time are used
Exclude
In a first step a timeSeries is generated.
In a second step all times are removed which are in range before or after excludeMargin seconds
of the times given by timeSeriesExclude.
This class provides functions for calculating and estimating
the signal delay in the dry and wet atmosphere.
ViennaMapping
Tropospheric delays based on the Vienna Mapping Functions 3 (VMF3) model
(Landskron and Boehm 2017, DOI: 10.1007/s00190-017-1066-2).
Hydrostatic and wet mapping function coefficients ($a_h$, $a_w$) and zenith delays (ZHD, ZWD) have to be provided
via inputfileVmfCoefficients. This file can contain either station-specific data
(see ViennaMappingFunctionStation2File) or data on a regular global grid
(see ViennaMappingFunctionGrid2File). In the second case mapping coefficients and zenith delays are
interpolated to the requested coordinates. This includes a height correction that requires approximate meteorological
data provided via inputfileGpt.
Name
Type
Annotation
inputfileVmfCoefficients
filename
ah, aw, zhd, zwd coefficients
inputfileGpt
filename
gridded GPT data
aHeight
double
parameter a (height correction)
bHeight
double
parameter b (height correction)
cHeight
double
parameter c (height correction)
GPT
Tropospheric delays based on the Global Pressure and Temperature 3 (GPT3) model
(Landskron and Boehm 2017, DOI: 10.1007/s00190-017-1066-2).
It is an empirical model derived from the Vienna Mapping Functions 3
(VMF3, see viennaMapping) and thus does not require
additional mapping coefficients and zenith delay values.
Name
Type
Annotation
inputfileGpt
filename
gridded GPT data
aHeight
double
parameter a (height correction)
bHeight
double
parameter b (height correction)
cHeight
double
parameter c (height correction)
MendesAndPavlis
Tropospheric delays based on the Mendes-Pavlis model that employs meteorological data.
(Mendes et al. (2002), 10.1029/2001GL014394 and
Mendes and Pavlis (2004), 110.1029/2004GL020308)
================================================
FILE: docs/index.html
================================================
link to documentation
================================================
FILE: docs/latex/.gitignore
================================================
# dynamic content
# ---------------
documentation.*
!documentation.tex
!documentation.pdf
*.log
================================================
FILE: docs/latex/classes.auto.tex
================================================
% auto generated by GROOPS
\section{AutoregressiveModelSequence}\label{autoregressiveModelSequenceType}
Represents a sequence of multivariate autoregressive (AR) models with increasing order $p$.
The AR models should be stored as \file{matrix file}{matrix} in the \reference{GROOPS definition of
AR models}{fundamentals.autoregressiveModel}.
The required AR models can be computed with \program{CovarianceMatrix2AutoregressiveModel},
and passed to this class through
\config{inputfileAutoregressiveModel} in increasing order.
The main purpose of AutoregressiveModelSequence is to use AR models of the form
\begin{equation}
\label{eq:ar-model}
\mathbf{y}_e(t_i) = \sum_{k=1}^p \mathbf{\Phi}^{(p)}_k\mathbf{y}_e(t_{i-k}) + \mathbf{w}(t_i),
\hspace{5pt} \mathbf{w}(t_i) \sim \mathcal{N}(0, \mathbf{\Sigma}^{(p)}_\mathbf{w}),
\end{equation}
to create pseudo-observation equations
\begin{equation}
\label{eq:pseudo-observations-transformed}
0 = \bar{\mathbf{\Phi}} \Delta\mathbf{y} + \bar{\mathbf{w}}, \hspace{5pt} \bar{\mathbf{w}} \sim
\mathcal{N}(0, \bar{\mathbf{\Sigma}}_{\bar{\mathbf{w}}}),
\end{equation}
with
\begin{equation}
\label{eq:pseudo-observations-ar}
\bar{\mathbf{\Phi}} =
\begin{bmatrix}
\mathbf{I} & & & & & \\
-\mathbf{\Phi}^{(1)}_1 & \mathbf{I} & & & & \\
-\mathbf{\Phi}^{(2)}_2 & -\mathbf{\Phi}^{(2)}_1 & \mathbf{I} & & & \\
-\mathbf{\Phi}^{(3)}_3 & -\mathbf{\Phi}^ {(3)}_2 & -\mathbf{\Phi}^ {(3)}_1 & \mathbf{I} & & \\
& -\mathbf{\Phi}^{(3)}_3 & -\mathbf{\Phi}^ {(3)}_2 & -\mathbf{\Phi}^ {(3)}_1 & \mathbf{I} & \\
& & \ddots & \ddots & \ddots & \ddots \\
\end{bmatrix},
\hspace{15pt}
\bar{\mathbf{\Sigma}}_{\bar{\mathbf{w}}} =
\bar{\mathbf{\Sigma}}_{\bar{\mathbf{w}}} =
\begin{bmatrix}
\mathbf{\Sigma}^{(0)}_{\mathbf{w}} & & & & & \\
& \mathbf{\Sigma}^{(1)}_{\mathbf{w}} & & & & \\
& & \mathbf{\Sigma}^{(2)}_{\mathbf{w}} & & & \\
& & & \mathbf{\Sigma}^{(3)}_{\mathbf{w}} & & \\
& & & & \mathbf{\Sigma}^{(3)}_{\mathbf{w}} & \\
& & & & & \ddots \\
\end{bmatrix}.
\end{equation}
used to constrain high-frequency temporal gravity field variations (see
\program{KalmanSmootherLeastSquares}, \program{NormalsBuildShortTimeStaticLongTime},
\program{PreprocessingSst}).
The corresponding normal equation coefficient matrix is given by
\begin{equation}
\label{eq:ar-normals}
\bar{\mathbf{\Phi}}^T\bar{\mathbf{\Sigma}}^{-1}_{\bar{\mathbf{w}}}\bar{\mathbf{\Phi}}
\end{equation}
and if all AR models are estimated from the same sample its inverse is a block-Toeplitz covariance matrix
\begin{equation}
(\mathbf{\Sigma}_{\mathbf{y}_m})_{ij} =
\begin{cases}
\mathbf{\Sigma}(|j-i|) & \text{for } i \leq j \\
\mathbf{\Sigma}(|j-i|))^T & \text{otherwise}
\end{cases},
\end{equation}
which can be computed using \program{AutoregressiveModel2CovarianceMatrix}.
A detailed description with applications can be found in:
Kvas, A., Mayer-Gürr, T. GRACE gravity field recovery with background model uncertainties.
J Geod 93, 2543–2552 (2019). \url{https://doi.org/10.1007/s00190-019-01314-1}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~autoregressiveModelSequenceType & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset-unbounded.pdf}~inputfileAutoregressiveModel & \hfuzz=500pt filename & \hfuzz=500pt matrix file containing an AR model\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~sigma0 & \hfuzz=500pt double & \hfuzz=500pt a-priori sigma for white noise covariance\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{Border}\label{borderType}
With this class you can select one or more region on the surface of the Earth.
In every instance of Border you can choose whether the specific region is excluded
from the overall result with the switch \config{exclude}.
To determine whether a specific point will be used furthermore the following algorithm will be applied:
In a first step all points are selected if first border excludes points otherwise all points excluded.
When every point will be tested for each instance of border from top to bottom.
If the point is not in the selected region nothing happens.
Otherwise it will included or excluded depending on the switch \config{exclude}.
First Example: The border excludes all continental areas.
The result are points on the oceans only.
Second Example: First border describes the continent north america. The next borders
excludes the great lakes and the last border describes Washington island.
In this configuration points are selected if they are inside north america
but not in the area of the great lakes. But if the point is on Washington island
it will be included again.
\subsection{Rectangle}
The region is restricted along lines of geographical coordinates.
\config{minPhi} and \config{maxPhi} describe the lower and the upper bound of the region.
\config{minLambda} and \config{maxLambda} define the left and right bound.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~minLambda & \hfuzz=500pt angle & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~maxLambda & \hfuzz=500pt angle & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~minPhi & \hfuzz=500pt angle & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~maxPhi & \hfuzz=500pt angle & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~exclude & \hfuzz=500pt boolean & \hfuzz=500pt dismiss points inside\\
\hline
\end{tabularx}
\subsection{Cap}
The region is defined by a spherical cap with the center given in geographical coordinates
longitude (\config{lambdaCenter}) and latitude (\config{phiCenter}).
The radius of the cap is given as aperture angle \config{psi}.
\fig{!hb}{0.4}{borderCap}{fig:borderCap}{spherical cap}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~lambdaCenter & \hfuzz=500pt angle & \hfuzz=500pt longitude of the center of the cap\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~phiCenter & \hfuzz=500pt angle & \hfuzz=500pt latitude of the center of the cap\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~psi & \hfuzz=500pt angle & \hfuzz=500pt aperture angle (radius)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~exclude & \hfuzz=500pt boolean & \hfuzz=500pt dismiss points inside\\
\hline
\end{tabularx}
\subsection{Polygon}\label{borderType:polygon}
The region is defined by \configFile{inputfilePolygon}{polygon}
containing one or more polygons given in longitude and latitude.
An additional \config{buffer} around the polygon can be defined.
Use a negative value to shrink the polygon area.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfilePolygon & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~buffer & \hfuzz=500pt double & \hfuzz=500pt buffer around polygon [km], \$<\$0: inside\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~exclude & \hfuzz=500pt boolean & \hfuzz=500pt dismiss points inside\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{Condition}\label{conditionType}
Test for conditions. See \reference{Loop and conditions}{general.loopsAndConditions} for usage.
\subsection{FileExist}
Check for a file or directory existing.
Supports wildcards * for any number of characters and ? for exactly one character.
Files smaller than \config{minSize} are treated as non-existent.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~file & \hfuzz=500pt filename & \hfuzz=500pt supports wildcards: * and ?\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~minimumSize & \hfuzz=500pt uint & \hfuzz=500pt minimum file size in byte.\\
\hline
\end{tabularx}
\subsection{Command}
Execute command and check success.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~command & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~silently & \hfuzz=500pt boolean & \hfuzz=500pt without showing the output.\\
\hline
\end{tabularx}
\subsection{Expression}\label{conditionType:expression}
Evaluate expression.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~expression & \hfuzz=500pt expression & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Matrix}
Evaluate elements of a \configClass{matrix}{matrixGeneratorType} based on an expression.
If \config{all}=\verb|yes|, all elements of the matrix must evaluate to true
for the condition to be fulfilled, otherwise any element evaluating to true is sufficient.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~matrix & \hfuzz=500pt \hyperref[matrixGeneratorType]{matrixGenerator} & \hfuzz=500pt expression is evaluated for each element of resulting matrix\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~expression & \hfuzz=500pt expression & \hfuzz=500pt (variable: data) evaluated for each element\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~all & \hfuzz=500pt boolean & \hfuzz=500pt all (=yes)/any (=no) elements must evaluate to true\\
\hline
\end{tabularx}
\subsection{MatrixEmpty}
Evaluate if \file{matrix}{matrix} (or \file{instrument}{instrument}) file is empty/has zero size.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileMatrix & \hfuzz=500pt filename & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{StringContainsPattern}
Determines if there is a match between a \config{pattern} and some subsequence in a \config{string}.
Supports wildcards * for any number of characters and ? for exactly one character.
If \config{isRegularExpression} is set, \config{pattern} is interpreted as a
regular expression instead. In any case, the \reference{text parser}{general.parser:text}
is applied beforehand.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~string & \hfuzz=500pt filename & \hfuzz=500pt should contain a \{variable\}\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~pattern & \hfuzz=500pt filename & \hfuzz=500pt supports wildcards: * and ?\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~isRegularExpression & \hfuzz=500pt boolean & \hfuzz=500pt pattern is a regular expression\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~caseSensitive & \hfuzz=500pt boolean & \hfuzz=500pt treat lower and upper case as distinct\\
\hline
\end{tabularx}
\subsection{StringMatchPattern}
Determines if a \config{pattern} matches the entire \config{string}.
Supports wildcards * for any number of characters and ? for exactly one character.
If \config{isRegularExpression} is set, \config{pattern} is interpreted as a
regular expression instead. In any case, the \reference{text parser}{general.parser:text}
is applied beforehand.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~string & \hfuzz=500pt filename & \hfuzz=500pt should contain a \{variable\}\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~pattern & \hfuzz=500pt filename & \hfuzz=500pt supports wildcards: * and ?\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~isRegularExpression & \hfuzz=500pt boolean & \hfuzz=500pt pattern is a regular expression\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~caseSensitive & \hfuzz=500pt boolean & \hfuzz=500pt treat lower and upper case as distinct\\
\hline
\end{tabularx}
\subsection{And}
All conditions must be met (with short-circuit evaluation).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~condition & \hfuzz=500pt \hyperref[conditionType]{condition} & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Or}
One of the conditions must be met (with short-circuit evaluation).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~condition & \hfuzz=500pt \hyperref[conditionType]{condition} & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Not}
The result of the condition is inverted.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~condition & \hfuzz=500pt \hyperref[conditionType]{condition} & \hfuzz=500pt \\
\hline
\end{tabularx}
\clearpage
%==================================
\section{CovariancePod}\label{covariancePodType}
Provides arc-wise covariance matrices for precise orbit data.
Temporal correlations are modeled in the orbit system (along, cross, radial).
The \configFile{inputfileCovarianceFunction}{matrix} provides temporal covariance functions for each axis.
From the diagonal matrix for each time step
\begin{equation}
Cov_{3\times3}(t) = \text{diag}(cov_x(t), cov_y(t), cov_z(t))
\end{equation}
the Toeplitz covariance matrix for an arc is constructed
\begin{equation}
\M C = \begin{pmatrix}
Cov(t_0) & Cov(t_1) & \cdots & & & \\
Cov(t_1) & Cov(t_0) & Cov(t_1) & \cdots & & \\
\cdots & Cov(t_1) & Cov(t_0) & Cov(t_1) & \cdots & \\
& \cdots & \ddots & \ddots & \ddots & \cdots \\
\end{pmatrix}
\end{equation}
The epoch-wise $3\times3$ covariance matrices given by \configFile{inputfileCovariancePodEpoch}{instrument}
are eigenvalue-decomposed
\begin{equation}
\M C_{3\times3}(t_i) = \M Q \M\Lambda \M Q^T,
\end{equation}
where $\M Q$ is an orthogonal matrix and $\M\Lambda$ diagonal.
This is used to split the covariances matrices
\begin{equation}
\M C_{3\times3}(t_i) = \M D(t_i) \M D(t_i)^T = (\M Q \M\Lambda^{1/2} \M Q^T)(\M Q \M\Lambda^{1/2} \M Q^T)^T,
\end{equation}
and to compose a block diagonal matrix for an arc
\begin{equation}
\M D = \text{diag}(\M D(t_1), \M D(t_2), \ldots, \M D(t_2)).
\end{equation}
The complete covariance matrix of an arc is given by
\begin{equation}
\M C_{arc} = \sigma_0^2 \sigma_{arc}^2 \M D \M C \M D^T +
\text{diag}(\sigma_1^2\M I_{3\times3}, \sigma_2^2\M I_{3\times3}, \ldots, \sigma_n^2\M I_{3\times3})
\end{equation}
where \config{sigma}~$\sigma_0$ is an overall factor
and the arc specific factors $\sigma_{arc}$ can be provided with \configFile{inputfileSigmasPerArc}{matrix}.
The last matrix can be used to downweight outliers in single epochs and will be added if
\configFile{inputfileSigmasPerEpoch}{instrument} is provided.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~covariancePodType & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~sigma & \hfuzz=500pt double & \hfuzz=500pt general variance factor\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileSigmasPerArc & \hfuzz=500pt filename & \hfuzz=500pt different accuracies for each arc (multiplied with sigma)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileSigmasPerEpoch & \hfuzz=500pt filename & \hfuzz=500pt different accuracies for each epoch (added)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileCovarianceFunction & \hfuzz=500pt filename & \hfuzz=500pt covariances in time for along, cross, and radial direction\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileCovariancePodEpoch & \hfuzz=500pt filename & \hfuzz=500pt 3x3 epoch-wise covariances\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{CovarianceSst}\label{covarianceSstType}
Provides arc-wise covariance matrices for satellite-to-satellite observations (SST).
The \configFile{inputfileCovarianceFunction}{matrix} provides a temporal covariance function.
From it the Toeplitz covariance matrix is constructed
\begin{equation}
\M C = \begin{pmatrix}
cov(t_0) & cov(t_1) & \cdots & & & \\
cov(t_1) & cov(t_0) & cov(t_1) & \cdots & & \\
\cdots & cov(t_1) & cov(t_0) & cov(t_1) & \cdots & \\
& \cdots & \ddots & \ddots & \ddots & \cdots \\
\end{pmatrix} \\
\end{equation}
The complete covariance matrix of an arc is given by
\begin{equation}
\M C_{arc} = \sigma_0^2 \sigma_{arc}^2 \M C + \sigma_{S,arc}^2 \M S_{arc}+ \text{diag}(\sigma_1^2, \sigma_2^2, \ldots, \sigma_n^2)
\end{equation}
where \config{sigma}~$\sigma_0$ is an overall factor and the arc specific factors $\sigma_{arc}$
can be provided with \configFile{inputfileSigmasPerArc}{matrix}.
The second term describes general covariance matrices for each arc
\configFile{inputfileCovarianceMatrixArc}{matrix} together with the factors $\sigma_{S,arc}$ from \config{sigmasCovarianceMatrixArc}.
The last matrix can be used to downweight outliers in single epochs and will be added if
\configFile{inputfileSigmasPerEpoch}{instrument} is provided.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~covarianceSstType & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~sigma & \hfuzz=500pt double & \hfuzz=500pt general variance factor\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileSigmasPerArc & \hfuzz=500pt filename & \hfuzz=500pt different accuaries for each arc (multplicated with sigma)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileSigmasPerEpoch & \hfuzz=500pt filename & \hfuzz=500pt different accuaries for each epoch (added)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~inputfileCovarianceFunction & \hfuzz=500pt filename & \hfuzz=500pt covariance function in time\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-unbounded.pdf}~inputfileCovarianceMatrixArc & \hfuzz=500pt filename & \hfuzz=500pt one matrix file per arc. Use \{arcNo\} as template\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~sigmasCovarianceMatrixArc & \hfuzz=500pt filename & \hfuzz=500pt vector with one sigma for each covarianceMatrixArc\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{DigitalFilter}\label{digitalFilterType}
Digital filter implementation for the filtering of equally spaced time series. This class implements the filter equations as
\begin{equation}\label{digitalFilterType:arma}
\sum_{l=0}^Q a_l y_{n-l} = \sum_{k=-p_0}^{P-p_0-1} b_k x_{n-k}, \hspace{25pt} a_0 = 1,
\end{equation}
where $Q$ is the autoregressive (AR) order and $P$ is the moving average (MA) order. Note that the MA part can also be non-causal.
The characteristics of a filter cascade can be computed by the programs \program{DigitalFilter2FrequencyResponse} and \program{DigitalFilter2ImpulseResponse}.
To apply a filter cascade to a time series (or an instrument file ) use \program{InstrumentFilter}.
Each filter can be applyed in forward and backward direction by setting \config{backwardDirection}.
If the same filter is applied in both directions, the combined filter has zero phase and the squared magnitude response.
Setting \config{inFrequencyDomain} to true applies the transfer function of the filter to the DFT of the input and synthesizes the result, i.e.:
\begin{equation}
y_n = \mathcal{F}^{-1}\{H\cdot\mathcal{F}\{x_n\}\}.
\end{equation}
This is equivalent to setting \config{padType} to \config{periodic}.
To reduce warmup effects, the input time series can be padded by choosing a \config{padType}:
\begin{itemize}
\item \config{none}: no padding is applied
\item \config{zero}: zeros are appended at the beginning and end of the input time series
\item \config{constant}: the beginning of the input time series is padded with the first value, the end is padded with the last value
\item \config{periodic}: periodic continuation of the input time series (i.,e. the beginning is padded with the last epochs and the end is padded with the first epochs)
\item \config{symmetric}: beginning and end are reflected around the first and last epoch respectively
\end{itemize}
\subsection{MovingAverage}
Moving average (boxcar) filter. For odd lengths, this filter is symmetric and has therefore no phase shift. For even lengths, a phase shift of half a cycle is introduced.
\[
y_n = \sum_{k=-\lfloor\frac{P}{2}\rfloor}^{\lfloor\frac{P}{2}\rfloor} \frac{1}{P}x_{n-k}
\]
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~length & \hfuzz=500pt uint & \hfuzz=500pt number of epochs in averaging operator\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Median}
Moving median filter of length $n$. The filter output at epoch $k$ is the median of the set start at $k-n/2$ to $k+n/2$.
The filter length $n$ should be uneven to avoid a phase shift.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~length & \hfuzz=500pt uint & \hfuzz=500pt length of the moving window [epochs]\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Derivative}
Symmetric MA filter for numerical differentiation using polynomial approximation. The input time series is approximated by a moving polynomial of degree \config{polynomialDegree}, by solving
\begin{equation}
\begin{bmatrix} x(t_k+\tau_0) \\ \vdots \\ x(t_k+\tau_M) \end{bmatrix}
=
\begin{bmatrix}
1 & \tau_0 & \tau_0^2 & \cdots & \tau_0^M \\
\vdots & \vdots & \vdots & & \vdots \\
1 & \tau_M & \tau_M^2 & \cdots & \tau_M^M \\
\end{bmatrix}%^{-1}
\begin{bmatrix}
a_0 \\ \vdots \\ a_M
\end{bmatrix}
\qquad\text{with}\quad
\tau_j = (j-M/2)\cdot \Delta t,
\end{equation}
for each time step $t_k$ ($\Delta t$ is the \config{sampling} of the time series).
The filter coefficients for the $k$-th derivative are obtained by taking the appropriate row of the inverse coefficient matrix $\mathbf{W}$:
\begin{equation}
b_n = \prod_{i=0}^{k-1} (k-i) \mathbf{w}_{2,:}.
\end{equation}
The \config{polynomialDegree} should be even if no phase shift should be introduced.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~polynomialDegree & \hfuzz=500pt uint & \hfuzz=500pt degree of approximation polynomial\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~derivative & \hfuzz=500pt uint & \hfuzz=500pt take kth derivative\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~sampling & \hfuzz=500pt double & \hfuzz=500pt assumed time step between points\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Integral}
Numerical integration using polynomial approximation.
The input time series is approximated by a moving polynomial of degree \config{polynomialDegree}
by solving
\begin{equation}
\begin{bmatrix} x(t_k+\tau_0) \\ \vdots \\ x(t_k+\tau_M) \end{bmatrix}
=
\begin{bmatrix}
1 & \tau_0 & \tau_0^2 & \cdots & \tau_0^M \\
\vdots & \vdots & \vdots & & \vdots \\
1 & \tau_M & \tau_M^2 & \cdots & \tau_M^M \\
\end{bmatrix}%^{-1}
\begin{bmatrix}
a_0 \\ \vdots \\ a_M
\end{bmatrix}
\qquad\text{with}\quad
\tau_j = (j-M/2)\cdot \Delta t,
\end{equation}
for each time step $t_k$ ($\Delta t$ is the \config{sampling} of the time series).
The numerical integral for each time step $t_k$ is approximated by the center interval of the estimated polynomial.
\fig{!hb}{0.7}{DigitalFilter_integral}{fig:DigitalFilterIntegral}{Numerical integration by polynomial approximation.}
\config{polynomialDegree} should be even to avoid a phase shift.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~polynomialDegree & \hfuzz=500pt uint & \hfuzz=500pt degree of approximation polynomial\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~sampling & \hfuzz=500pt double & \hfuzz=500pt assumed time step between points\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Correlation}
Correlation ($\rho$) of \config{corr} is introduced into the time series:
\begin{equation}
y_n = \rho\cdot y_{n-1} + \sqrt{1-\rho^2}x_n.
\end{equation}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~correlation & \hfuzz=500pt double & \hfuzz=500pt correlation\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~backwardDirection & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in backward direction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{GraceLowpass}
Low pass and differentation filter as used for GRACE KBR and ACC data in the Level1A processing.
\fig{!hb}{0.8}{DigitalFilter_graceLowpass}{fig:DigitalFilterGraceLowpass}{Amplitude response of the low pass filter used in the L1A processing.}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~rawDataRate & \hfuzz=500pt double & \hfuzz=500pt sampling frequency in Hz (fs).\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~convolutionNumber & \hfuzz=500pt uint & \hfuzz=500pt number of self convolutions of the filter kernel\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~fitInterval & \hfuzz=500pt double & \hfuzz=500pt length of the filter kernel [seconds]\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~lowPassBandwith & \hfuzz=500pt double & \hfuzz=500pt target low pass bandwidth\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~normFrequency & \hfuzz=500pt double & \hfuzz=500pt norm filter at this frequency [Hz] (default: GRACE dominant (J2) signal frequency)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~reduceQuadraticFit & \hfuzz=500pt boolean & \hfuzz=500pt remove-\$>\$filter-\$>\$restore quadratic fit\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~derivative & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~derivative1st & \hfuzz=500pt & \hfuzz=500pt range rate\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~derivative2nd & \hfuzz=500pt & \hfuzz=500pt range acceleration\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Butterworth}
Digital implementation of the Butterworth filter. The design of the filter is done by modifying the analog (continuous time) transfer function, which is
then transformed into the digital domain by using the bilinear transform. The filter coefficients are then determined by a least squares adjustment in time domain.
The \config{filterType} can be \config{lowpass}, \config{highpass}, where one cutoff frequency has to be specified, and \config{bandpass} and \config{bandstop} where to cutoff frequencies have to be specified.
Cutoff frequencies must be given as normalized frequency $w_n = f/f_{\text{nyq}}$. For a cutoff frequency of 30~mHz for a time series sampled with 5~seconds gives a normalized frequency of $0.03/0.1 = 0.3$.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~order & \hfuzz=500pt uint & \hfuzz=500pt filter order\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~type & \hfuzz=500pt choice & \hfuzz=500pt filter type\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~lowpass & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn & \hfuzz=500pt double & \hfuzz=500pt normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~highpass & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn & \hfuzz=500pt double & \hfuzz=500pt normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~bandpass & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn1 & \hfuzz=500pt double & \hfuzz=500pt lower normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn2 & \hfuzz=500pt double & \hfuzz=500pt upper normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~bandstop & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn1 & \hfuzz=500pt double & \hfuzz=500pt lower normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\quad\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~Wn2 & \hfuzz=500pt double & \hfuzz=500pt upper normalized cutoff frequency (f\_c / f\_nyq)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~backwardDirection & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in backward direction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{File}
Read filter coefficients of \eqref{digitalFilterType:arma} from a coefficient file.
One column might define the index $n$
of the coefficients $a_n$ and $b_n$ in the other columns.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileMatrix & \hfuzz=500pt filename & \hfuzz=500pt matrix with filter coefficients\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~index & \hfuzz=500pt expression & \hfuzz=500pt index of coefficients (input columns are named data0, data1, ...)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~bn & \hfuzz=500pt expression & \hfuzz=500pt MA coefficients (moving average) (input columns are named data0, data1, ...)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~an & \hfuzz=500pt expression & \hfuzz=500pt AR coefficients (autoregressive) (input columns are named data0, data1, ...)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~backwardDirection & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in backward direction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Wavelet}
Filter representation of a wavelet.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileWavelet & \hfuzz=500pt filename & \hfuzz=500pt wavelet coefficients\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~type & \hfuzz=500pt choice & \hfuzz=500pt filter type\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~lowpass & \hfuzz=500pt & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~highpass & \hfuzz=500pt & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~level & \hfuzz=500pt uint & \hfuzz=500pt compute filter for specific decomposition level\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~backwardDirection & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in backward direction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Notch}
Implemented after Christian Siemes' dissertation, page 106.
\fig{!hb}{0.6}{DigitalFilter_notch}{fig:DigitalFilterNotch}{Amplitude response of a notch filter of order three with default settings.}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~notchFrequency & \hfuzz=500pt double & \hfuzz=500pt normalized notch frequency w\_n = (f\_n/f\_nyq)\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~bandWidth & \hfuzz=500pt double & \hfuzz=500pt bandwidth at -3db. Quality factor of filter Q = w\_n/bw\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~backwardDirection & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in backward direction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{Decorrelation}
Moving average decorrelation filter based on eigendecomposition of a Toeplitz covariance matrix.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileCovarianceFunction & \hfuzz=500pt filename & \hfuzz=500pt covariance function of time series\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{TimeLag}
Lag operator in digital filter representation.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~lag & \hfuzz=500pt int & \hfuzz=500pt lag epochs: 1 (lag); -1 (lead)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inFrequencyDomain & \hfuzz=500pt boolean & \hfuzz=500pt apply filter in frequency domain\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~padType & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~none & \hfuzz=500pt & \hfuzz=500pt no padding is applied\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~zero & \hfuzz=500pt & \hfuzz=500pt zero padding\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~constant & \hfuzz=500pt & \hfuzz=500pt pad using first and last value\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~periodic & \hfuzz=500pt & \hfuzz=500pt periodic continuation of matrix\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~symmetric & \hfuzz=500pt & \hfuzz=500pt symmetric continuation around the matrix edges\\
\hline
\end{tabularx}
\subsection{ReduceFilterOutput}
Removes the filtered signal from the input, i.e. the input is passed
through a \configClass{digitalFilter}{digitalFilterType} with a frequency response of $1-H(f)$.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~filter & \hfuzz=500pt \hyperref[digitalFilterType]{digitalFilter} & \hfuzz=500pt remove filter output from input signal\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{Doodson}\label{doodson}
This is a string which describes a tidal frequency either coded as Doodson number
or using Darwin´s name, e.g. \verb|255.555| or \verb|M2|.
The following names are defined:
\begin{itemize}
\item \verb|055.565|: \verb|om1| \item \verb|055.575|: \verb|om2| \item \verb|056.554|: \verb|sa|
\item \verb|056.555|: \verb|sa| \item \verb|057.555|: \verb|ssa| \item \verb|058.554|: \verb|sta|
\item \verb|063.655|: \verb|msm| \item \verb|065.455|: \verb|mm| \item \verb|073.555|: \verb|msf|
\item \verb|075.555|: \verb|mf| \item \verb|083.655|: \verb|mstm| \item \verb|085.455|: \verb|mtm|
\item \verb|093.555|: \verb|msq| \item \verb|093.555|: \verb|msqm| \item \verb|125.755|: \verb|2q1|
\item \verb|127.555|: \verb|sig1| \item \verb|127.555|: \verb|sigma1| \item \verb|135.655|: \verb|q1|
\item \verb|137.455|: \verb|ro1| \item \verb|137.455|: \verb|rho1| \item \verb|145.555|: \verb|o1|
\item \verb|147.555|: \verb|tau1| \item \verb|155.655|: \verb|m1| \item \verb|157.455|: \verb|chi1|
\item \verb|162.556|: \verb|pi1| \item \verb|163.555|: \verb|p1| \item \verb|164.555|: \verb|s1|
\item \verb|165.555|: \verb|k1| \item \verb|166.554|: \verb|psi1| \item \verb|167.555|: \verb|fi1|
\item \verb|167.555|: \verb|phi1| \item \verb|173.655|: \verb|the1| \item \verb|173.655|: \verb|theta1|
\item \verb|175.455|: \verb|j1| \item \verb|183.555|: \verb|so1| \item \verb|185.555|: \verb|oo1|
\item \verb|195.455|: \verb|v1| \item \verb|225.855|: \verb|3n2| \item \verb|227.655|: \verb|eps2|
\item \verb|235.755|: \verb|2n2| \item \verb|237.555|: \verb|mu2| \item \verb|237.555|: \verb|mi2|
\item \verb|245.655|: \verb|n2| \item \verb|247.455|: \verb|nu2| \item \verb|247.455|: \verb|ni2|
\item \verb|253.755|: \verb|gam2| \item \verb|254.556|: \verb|alf2| \item \verb|255.555|: \verb|m2|
\item \verb|256.554|: \verb|bet2| \item \verb|257.555|: \verb|dlt2| \item \verb|263.655|: \verb|la2|
\item \verb|263.655|: \verb|lmb2| \item \verb|263.655|: \verb|lambda2| \item \verb|265.455|: \verb|l2|
\item \verb|271.557|: \verb|2t2| \item \verb|272.556|: \verb|t2| \item \verb|273.555|: \verb|s2|
\item \verb|274.554|: \verb|r2| \item \verb|275.555|: \verb|k2| \item \verb|283.655|: \verb|ksi2|
\item \verb|285.455|: \verb|eta2| \item \verb|355.555|: \verb|m3| \item \verb|381.555|: \verb|t3|
\item \verb|382.555|: \verb|s3| \item \verb|383.555|: \verb|r3| \item \verb|435.755|: \verb|n4|
\item \verb|445.655|: \verb|mn4| \item \verb|455.555|: \verb|m4| \item \verb|473.555|: \verb|ms4|
\item \verb|491.555|: \verb|s4| \item \verb|655.555|: \verb|m6| \item \verb|855.555|: \verb|m8|
\end{itemize}
\clearpage
%==================================
\section{EarthRotation}\label{earthRotationType}
This class realize the transformation between a terestrial
reference frame (TRF) and a celestial reference frame (CRF).
\subsection{File}\label{earthRotationType:file}
This class realize the transformation by interpolation from file.
This file can be created with \program{EarthOrientationParameterTimeSeries}.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~interpolationDegree & \hfuzz=500pt uint & \hfuzz=500pt for polynomial interpolation\\
\hline
\end{tabularx}
\subsection{Iers2010}
This class realize the transformation according to the IERS2010 conventions
given by the \emph{International Earth Rotation and Reference Systems Service} (IERS).
A file with the earth orientation parameter is needed (\configFile{inputfileEOP}{earthOrientationParameter}).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inputfileEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~truncatedNutation & \hfuzz=500pt boolean & \hfuzz=500pt use truncated nutation model (IAU2006B)\\
\hline
\end{tabularx}
\subsection{Iers2010b}\label{earthRotationType:iers2010b}
This class realize the transformation according to the IERS2010 conventions
given by the \emph{International Earth Rotation and Reference Systems Service} (IERS).
A file with the earth orientation parameter is needed (\configFile{inputfileEOP}{earthOrientationParameter}).
Includes additional high-frequency EOP models (\configFile{inputfileDoodsonEOP}{doodsonEarthOrientationParameter}).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inputfileEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inputfileDoodsonEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Iers2003}
This class realize the transformation according to IERS2003 conventions
given by the \emph{International Earth Rotation and Reference Systems Service} (IERS).
A file with the earth orientation parameter is needed (\configFile{inputfileEOP}{earthOrientationParameter}).
The following subroutines are used:
\begin{itemize}
\item BPN2000.f,
\item ERA2000.f,
\item pmsdnut.f,
\item POM2000.f,
\item SP2000.f,
\item T2C2000.f,
\item XYS2000A.f
\end{itemize}
from \url{ftp://maia.usno.navy.mil/conv2000/chapter5/} and
\begin{itemize}
\item orthoeop.f
\end{itemize}
from \url{ftp://maia.usno.navy.mil/conv2000/chapter8/}
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Iers1996}
Very old.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inputfileEOP & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileNutation & \hfuzz=500pt filename & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{Gmst}
The transformation is realized as rotation about the z-axis.
The angle ist given by the Greenwich Mean Siderial Time (GMST).
\begin{verbatim}
Double Tu0 = (timeUTC.mjdInt()-51544.5)/36525.0;
Double GMST0 = (6.0/24 + 41.0/(24*60) + 50.54841/(24*60*60))
+ (8640184.812866/(24*60*60))*Tu0
+ (0.093104/(24*60*60))*Tu0*Tu0
+ (-6.2e-6/(24*60*60))*Tu0*Tu0*Tu0;
Double r = 1.002737909350795 + 5.9006e-11*Tu0 - 5.9e-15*Tu0*Tu0;
GMST = fmod(2*PI*(GMST0 + r * timeUTC.mjdMod()), 2*PI);
\end{verbatim}
\subsection{Earth Rotation Angle (ERA)}
The transformation is realized as rotation about the z-axis.
The angle ist given by the Earth Rotation Angle (ERA) as
\begin{verbatim}
const Time T = timeUT1-mjd2time(J2000);
ERA = fmod(2*PI*(0.7790572732640 + T.mjdMod() + 0.00273781191135448*T.mjd()), 2*PI);
\end{verbatim}
\subsection{Z-Axis}
The transformation is realized as rotation about the z-axis.
You must specify the angle (\config{initialAngle}) at \config{time0} and
the angular velocity (\config{angularVelocity}).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~initialAngle & \hfuzz=500pt double & \hfuzz=500pt Angle at time0 [rad]\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~angularVelocity & \hfuzz=500pt double & \hfuzz=500pt [rad/s]\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~time0 & \hfuzz=500pt time & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{StarCamera}
This class reads quaternions from an instrument file and interpolates to the given time stamp.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileStarCamera & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~interpolationDegree & \hfuzz=500pt uint & \hfuzz=500pt degree of interpolation polynomial\\
\hline
\end{tabularx}
\subsection{MoonRotation}
This class realizes the transformation between the moon-fixed system
(Principal Axis System (PA) or Mean Earth System (ME))
and the ICRS according to the JPL ephemeris file.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~inputfileEphemerides & \hfuzz=500pt filename & \hfuzz=500pt librations\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~moonfixedSystem & \hfuzz=500pt choice & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~PA & \hfuzz=500pt & \hfuzz=500pt Principal Axis System\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~ME & \hfuzz=500pt & \hfuzz=500pt Mean Earth System\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{Eclipse}\label{eclipseType}
Shadowing of satellites by moon and Earth provided as factor
between $[0,1]$ with 0: full shadow and 1: full sun light.
\subsection{Conical}
\fig{!hb}{0.8}{eclipseConical}{fig:eclipseConical}{Modelling umbra and penumbra.}
\subsection{SOLAARS}
Earth’s penumbra modeling with Solar radiation pressure with
Oblateness and Lower Atmospheric Absorption, Refraction, and Scattering (SOLAARS).
See Robertson, Robbie. (2015),
Highly Physical Solar Radiation Pressure Modeling During Penumbra Transitions (pp. 67-75).
\clearpage
%==================================
\section{Ephemerides}\label{ephemeridesType}
Ephemerides of Sun, Moon and planets.
The coordinate system is defined as center of \configClass{origin}{planetType}.
\section{JPL}\label{ephemeridesType:jpl}
Using \verb|DExxx| ephemerides from NASA Jet Propulsion Laboratory (JPL).
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileEphemerides & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~origin & \hfuzz=500pt \hyperref[planetType]{planet} & \hfuzz=500pt center of coordinate system\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{Forces}\label{forcesType}
This class provides the forces acting on a satellite.
This encompasses \configClass{gravityfield}{gravityfieldType}, \configClass{tides}{tidesType}
and \configClass{miscAccelerations}{miscAccelerationsType}.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~forcesType & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-unbounded.pdf}~gravityfield & \hfuzz=500pt \hyperref[gravityfieldType]{gravityfield} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-unbounded.pdf}~tides & \hfuzz=500pt \hyperref[tidesType]{tides} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-unbounded.pdf}~miscAccelerations & \hfuzz=500pt \hyperref[miscAccelerationsType]{miscAccelerations} & \hfuzz=500pt \\
\hline
\end{tabularx}
\clearpage
%==================================
\section{GnssAntennaDefintionList}\label{gnssAntennaDefintionListType}
Provides a list of GnssAntennaDefinitions as used in \program{GnssAntennaDefinitionCreate}.
\subsection{New}
Creates a new antenna.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~name & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~serial & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~radome & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~comment & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~pattern & \hfuzz=500pt sequence & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~type & \hfuzz=500pt \hyperref[gnssType]{gnssType} & \hfuzz=500pt pattern matching of observation types\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetX & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetY & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetZ & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~deltaAzimuth & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~deltaZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~maxZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree]\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~values & \hfuzz=500pt expression & \hfuzz=500pt [m] expression (zenith, azimuth: variables)\\
\hline
\end{tabularx}
\subsection{FromFile}
Select all or the first antenna from an \file{antenna definition file}{gnssAntennaDefinition}
which matches the wildcards.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileAntennaDefinition & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~name & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~serial & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~radome & \hfuzz=500pt string & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~onlyFirstMatch & \hfuzz=500pt boolean & \hfuzz=500pt otherwise all machting antennas included\\
\hline
\end{tabularx}
\subsection{FromStationInfo}
Select all antennas from an \file{antenna definition file}{gnssAntennaDefinition}
which are used by a station within a defined time interval.
With \config{specializeAntenna} an individual antenna is created for each different serial number
using the general type specific values from file.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileStationInfo & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~inputfileAntennaDefinition & \hfuzz=500pt filename & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~timeStart & \hfuzz=500pt time & \hfuzz=500pt only antennas used in this time interval\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~timeEnd & \hfuzz=500pt time & \hfuzz=500pt only antennas used in this time interval\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~specializeAntenna & \hfuzz=500pt boolean & \hfuzz=500pt e.g. separate different serial numbers from stationInfo\\
\hline
\end{tabularx}
\subsection{Resample}
The azimuth and elevation dependent antenna center variations (patterns) of all \config{antenna}s
are resampled to a new resolution.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~antenna & \hfuzz=500pt \hyperref[gnssAntennaDefintionListType]{gnssAntennaDefintionList} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~deltaAzimuth & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size, empty: no change\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~deltaZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size, empty: no change\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~maxZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree], empty: no change\\
\hline
\end{tabularx}
\subsection{Transform}
This class can be used to separate general antenna patterns for different \configClass{gnssType}{gnssType}s.
If the \config{antenna}s contain only one pattern for all GPS observations on the L1 frequency (\verb|*1*G**|),
the \config{patternTypes}=\verb|C1*G**| and \verb|L1*G**| create two patterns with the \verb|*1*G**| patterm as template.
The first matching pattern in the \config{antenna} is used as template.
Also new \config{additionalPattern} can be added (e.g. for \verb|*5*G**|).
With \config{addExistingPatterns} all already existing patterns that don't match completely to any of the above are added.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~antenna & \hfuzz=500pt \hyperref[gnssAntennaDefintionListType]{gnssAntennaDefintionList} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-unbounded.pdf}~patternTypes & \hfuzz=500pt \hyperref[gnssType]{gnssType} & \hfuzz=500pt gnssType for each pattern (first match is used)\\
\hfuzz=500pt\includegraphics[width=1em]{element-unbounded.pdf}~additionalPattern & \hfuzz=500pt sequence & \hfuzz=500pt additional new patterns\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~type & \hfuzz=500pt \hyperref[gnssType]{gnssType} & \hfuzz=500pt pattern matching of observation types\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetX & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetY & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~offsetZ & \hfuzz=500pt double & \hfuzz=500pt [m] antenna center offset\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~deltaAzimuth & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element-mustset.pdf}~deltaZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree] step size\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~maxZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree]\\
\hfuzz=500pt\includegraphics[width=1em]{connector.pdf}\includegraphics[width=1em]{element.pdf}~values & \hfuzz=500pt expression & \hfuzz=500pt [m] expression (zenith, azimuth: variables)\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~addExistingPatterns & \hfuzz=500pt boolean & \hfuzz=500pt add existing patterns that don't match completely any of the above\\
\hline
\end{tabularx}
\subsection{Rename}
Replaces parts of the descrption of \config{antenna}s.
The star "\verb|*|" left this part untouched.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~antenna & \hfuzz=500pt \hyperref[gnssAntennaDefintionListType]{gnssAntennaDefintionList} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~name & \hfuzz=500pt string & \hfuzz=500pt *: left this part untouched\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~serial & \hfuzz=500pt string & \hfuzz=500pt *: left this part untouched\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~radome & \hfuzz=500pt string & \hfuzz=500pt *: left this part untouched\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~comment & \hfuzz=500pt string & \hfuzz=500pt *: left this part untouched\\
\hline
\end{tabularx}
\subsection{SetZero}
The antenna center variations (patterns) or offsets
of all \config{antenna}s are set to zero.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~antenna & \hfuzz=500pt \hyperref[gnssAntennaDefintionListType]{gnssAntennaDefintionList} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-unbounded.pdf}~patternTypes & \hfuzz=500pt \hyperref[gnssType]{gnssType} & \hfuzz=500pt only matching patterns, default: all\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~zeroOffset & \hfuzz=500pt boolean & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~zeroPattern & \hfuzz=500pt boolean & \hfuzz=500pt \\
\hline
\end{tabularx}
\subsection{RemoveCenterMean}
The antenna offset and antenna variations (patterns) are inseparable parts of the
antenna model. With \config{removeOffset} an estimated offset is removed from
all selected patterns and added to the offset. With \config{removeMean} an estimated
constant is removed additionally as it cannot be seperated from signal biases.
The mean and offset are defined as discretized (\config{deltaAzimuth},
\config{dZenith}) integral of the spherical cap from zenith down to \config{maxZenith}.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element-mustset-unbounded.pdf}~antenna & \hfuzz=500pt \hyperref[gnssAntennaDefintionListType]{gnssAntennaDefintionList} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-unbounded.pdf}~patternTypes & \hfuzz=500pt \hyperref[gnssType]{gnssType} & \hfuzz=500pt only matching patterns, default: all\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~removeMean & \hfuzz=500pt boolean & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~removeOffset & \hfuzz=500pt boolean & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~deltaAzimuth & \hfuzz=500pt angle & \hfuzz=500pt [degree] sampling of pattern to estimate center/constant\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~deltaZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree] sampling of pattern to estimate center/constant\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~maxZenith & \hfuzz=500pt angle & \hfuzz=500pt [degree] sampling of pattern to estimate center/constant\\
\hline
\end{tabularx}
\clearpage
%==================================
\section{GnssParametrization}\label{gnssParametrizationType}
This class defines the models and parameters of the linearized observation equations
for all phase and code measurements (see \program{GnssProcessing})
\begin{equation}\label{gnssParametrizationType:model}
\M l - \M f(\M x_0) = \left.\frac{\partial \M f(\M x)}{\partial \M x}\right|_{\M x_0} \Delta\M x + \M\epsilon,
\end{equation}
where the left side is the observation vector minus the effects computed from the a priori models.
After each least squares adjustment
(see \configClass{GnssProcessing:processingStep:estimate}{gnssProcessingStepType:estimate})
the a priori parameters are updated
\begin{equation}\label{gnssParametrizationType:update}
\M x_0 := \M x_0 + \Delta\hat{\M x}.
\end{equation}
The vector $\M x_0$ can be written with
\configClass{GnssProcessing:processingStep:writeAprioriSolution}{gnssProcessingStepType:writeAprioriSolution}.
Any \config{outputfiles} defined in the parametrizations are written with
\configClass{GnssProcessing:processingStep:writeResults}{gnssProcessingStepType:writeResults}.
Each parametrization (and possible constraint equations) has a \config{name} which enables
activating/deactivating the estimation of subsets of $\Delta\M x$ with
\configClass{GnssProcessing:processingStep:selectParametrizations}{gnssProcessingStepType:selectParametrizations}.
The a priori model $\M f(\M x_0)$ is unaffected and is always reduced.
The model for the different observation types can be described as
\begin{equation}\label{gnssParametrizationType:gnssFullModel}
\begin{split}
f[\tau\nu a]_r^s(\M x) &= \text{geometry}(\M r_r^s) + \text{clock}^s(t) + \text{clock}_r(t) \\
&+ \text{ionosphere}([\tau\nu],t,\M r_r^s) + \text{troposphere}(t,\M r_r^s) \\
&+ \text{antenna}[\tau\nu a]^s + \text{antenna}[\tau\nu a]_r \\
&+ \text{bias}[\tau\nu a]^s + \text{bias}[\tau\nu a]_r
+ \lambda[L\nu] N[L\nu a]_r^s + \text{other}(\ldots) + \epsilon[\tau\nu a]_r^s
\end{split}
\end{equation}
The notation $[\tau\nu a]_r^s$ describes the
attribution to a signal type $\tau$ (i.e., C or L), frequency $\nu$,
signal attribute $a$ (e.g., C, W, Q, X), transmitting satellite $s$, and observing receiver $r$.
It follows the \href{https://files.igs.org/pub/data/format/rinex305.pdf}{RINEX 3 definition},
see \reference{GnssType}{gnssType}.
See also \program{GnssProcessing}.
\subsection{IonosphereSTEC}\label{gnssParametrizationType:ionosphereSTEC}
The influence of the ionosphere is modelled by a STEC parameter (slant total electron content)
in terms of $[TECU]$ between each transmitter and receiver at each epoch. These parameters are pre-eliminated
from the observation equations before accumulating the normal equations.
This is similar to using the ionosphere-free linear combination as observations
but only one STEC parameter is needed for an arbitrary number of observation types.
The influence on the code and phase observation is modeled as
\begin{equation}\label{gnssParametrizationType:IonosphereSTEC:STEC}
\begin{split}
\text{ionosphere}([C\nu], STEC) &= \frac{40.3}{f_{\nu}^2}STEC + \frac{7525\M b^T\M k}{f_{\nu}^3}STEC + \frac{r}{f_{\nu}^4}STEC^2 \\
\text{ionosphere}([L\nu], STEC) &= -\frac{40.3}{f_{\nu}^2}STEC - \frac{7525\M b^T\M k}{2f_{\nu}^3}STEC - \frac{r}{3f_{\nu}^4}STEC^2 + \text{bending}(E)STEC^2
\end{split}
\end{equation}
The second order term depends on the \configClass{magnetosphere}{magnetosphereType} $\M b$
and the direction of the signal $\M k$.
If further information about the ionosphere is available
(in the form of a prior model or as additional parametrizations
such as \configClass{parametrization:ionosphereMap}{gnssParametrizationType:ionosphereMap} or
\configClass{parametrization:ionosphereVTEC}{gnssParametrizationType:ionosphereVTEC}) the STEC
parameters describe local and short–term scintillations. The STEC parameters are estimated
as additions to the model and it is advised to constrain them towards zero
with a standard deviation of \config{sigmaSTEC}.
\keepXColumns
\begin{tabularx}{\textwidth}{N T A}
\hline
Name & Type & Annotation\\
\hline
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~name & \hfuzz=500pt string & \hfuzz=500pt used for parameter selection\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~apply2ndOrderCorrection & \hfuzz=500pt boolean & \hfuzz=500pt apply ionospheric correction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~apply3rdOrderCorrection & \hfuzz=500pt boolean & \hfuzz=500pt apply ionospheric correction\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~applyBendingCorrection & \hfuzz=500pt boolean & \hfuzz=500pt apply ionospheric correction\\
\hfuzz=500pt\includegraphics[width=1em]{element-mustset.pdf}~magnetosphere & \hfuzz=500pt \hyperref[magnetosphereType]{magnetosphere} & \hfuzz=500pt \\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~nameConstraint & \hfuzz=500pt string & \hfuzz=500pt used for parameter selection\\
\hfuzz=500pt\includegraphics[width=1em]{element.pdf}~sigmaSTEC & \hfuzz=500pt expression & \hfuzz=500pt expr. for sigma [TECU] for STEC constraint, variable E (elevation) available\\
\hline
\end{tabularx}
\subsection{IonosphereVTEC}\label{gnssParametrizationType:ionosphereVTEC}
The influence of the ionosphere is modelled by a VTEC parameter (vertical total electron content)
in terms of $[TECU]$ for every selected receiver at each epoch. Optionally, VTEC gradients in the
North (x) and East (y) direction can be estimated via \configClass{gradient}{parametrizationTemporalType}.
The slant TEC is computed based on the VTEC and the optional North and East gradients $\Delta V_x$ and $\Delta V_y$
using the elevation-dependent Modified Single-Layer Model (MSLM) mapping function
\begin{equation}\label{gnssParametrizationType:IonosphereVTEC:STEC}
STEC = \frac{VTEC + \cos(A) \Delta V_x + \sin(A) \Delta V_y}{\cos z'}
\qquad\text{with}\qquad
\sin z'= \left(\frac{R}{R+H}\right)\sin\left(\alpha(\pi/2-E)\right)
\end{equation}
inserted into eq.~\eqref{gnssParametrizationType:IonosphereSTEC:STEC},
where $A$ is the azimuth angle and $E$ is the elevation angle.
The result is written as a \file{times series file}{instrument} at epochs with observations
depending on \configClass{GnssProcessing:processingStep:selectEpochs}{gnssProcessingStepType:selectEpochs}.
This class provides a simplified model of the ionosphere for single receivers
and enables the separation of the TEC and signal biases, meaning
\configClass{parametrization:tecBiases}{gnssParametrizationType:tecBiases} becomes estimable.
Local and short-term scintillations should be considered by adding loosely constrained
\configClass{parametrization:ionosphereSTEC}{gnssParametrizationType:ionosphereSTEC}.
The \file{parameter names}{parameterName} are
\begin{itemize}
\item \verb|:VTEC::